From 86fc1c8ccdb6ae2ca6183f46223b10760d1ebaee Mon Sep 17 00:00:00 2001 From: Philipp Defner Date: Tue, 30 Jan 2024 12:39:10 +0100 Subject: [PATCH 1/3] Update dependencies, update staticcheck --- .github/workflows/go.yml | 4 +- go.mod | 160 +- go.sum | 426 +- .../go/cloudsqlconn/CHANGELOG.md | 70 + .../go/cloudsqlconn/README.md | 97 +- .../go/cloudsqlconn/connect_tls_117.go | 44 - .../go/cloudsqlconn/connect_tls_other.go | 42 - .../go/cloudsqlconn/dialer.go | 186 +- .../cloud.google.com/go/cloudsqlconn/doc.go | 167 + .../go/cloudsqlconn/instance/conn_name.go | 76 + .../internal/cloudsql/instance.go | 147 +- .../cloudsqlconn/internal/cloudsql/refresh.go | 36 +- .../go/cloudsqlconn/internal/trace/trace.go | 6 +- .../go/cloudsqlconn/options.go | 28 +- .../cloudsqlconn/postgres/pgxv4/postgres.go | 29 +- .../go/cloudsqlconn/version.txt | 2 +- .../go/compute/internal/version.go | 2 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 170 + .../Azure/azure-sdk-for-go/sdk/azcore/core.go | 65 +- .../Azure/azure-sdk-for-go/sdk/azcore/doc.go | 7 + .../sdk/azcore/internal/exported/exported.go | 108 + .../sdk/azcore/internal/exported/pipeline.go | 20 - .../sdk/azcore/internal/exported/request.go | 31 + .../internal/exported/response_error.go | 51 +- .../sdk/azcore/internal/pollers/fake/fake.go | 133 + .../sdk/azcore/internal/shared/constants.go | 14 +- .../sdk/azcore/internal/shared/shared.go | 101 +- .../sdk/azcore/policy/policy.go | 27 +- .../sdk/azcore/runtime/pager.go | 59 +- .../sdk/azcore/runtime/pipeline.go | 36 +- .../sdk/azcore/runtime/policy_bearer_token.go | 39 +- .../sdk/azcore/runtime/policy_http_header.go | 3 +- .../sdk/azcore/runtime/policy_http_trace.go | 143 + .../azcore/runtime/policy_include_response.go | 5 +- .../azcore/runtime/policy_key_credential.go | 57 + .../sdk/azcore/runtime/policy_logging.go | 3 +- .../sdk/azcore/runtime/policy_retry.go | 16 +- .../azcore/runtime/policy_sas_credential.go | 47 + .../sdk/azcore/runtime/policy_telemetry.go | 4 + .../sdk/azcore/runtime/poller.go | 103 +- .../sdk/azcore/runtime/request.go | 111 +- .../sdk/azcore/runtime/response.go | 30 +- .../runtime/transport_default_dialer_other.go | 15 + .../runtime/transport_default_dialer_wasm.go | 15 + .../runtime/transport_default_http_client.go | 17 +- .../sdk/azcore/tracing/tracing.go | 61 +- .../sdk/internal/errorinfo/errorinfo.go | 30 + .../sdk/storage/azblob/CHANGELOG.md | 81 + .../sdk/storage/azblob/README.md | 148 +- .../sdk/storage/azblob/appendblob/client.go | 77 +- .../sdk/storage/azblob/appendblob/models.go | 6 +- .../sdk/storage/azblob/assets.json | 2 +- .../sdk/storage/azblob/blob/client.go | 232 +- .../sdk/storage/azblob/blob/constants.go | 6 + .../sdk/storage/azblob/blob/models.go | 23 +- .../sdk/storage/azblob/blob/responses.go | 3 + .../storage/azblob/bloberror/error_codes.go | 3 + .../storage/azblob/blockblob/chunkwriting.go | 72 +- .../sdk/storage/azblob/blockblob/client.go | 122 +- .../sdk/storage/azblob/blockblob/constants.go | 13 + .../sdk/storage/azblob/blockblob/models.go | 75 +- .../sdk/storage/azblob/blockblob/responses.go | 3 + .../sdk/storage/azblob/ci.yml | 6 + .../sdk/storage/azblob/client.go | 5 +- .../sdk/storage/azblob/common.go | 2 +- .../storage/azblob/container/batch_builder.go | 94 + .../sdk/storage/azblob/container/client.go | 139 +- .../sdk/storage/azblob/container/models.go | 96 + .../sdk/storage/azblob/container/responses.go | 25 + .../sdk/storage/azblob/doc.go | 43 +- .../storage/azblob/internal/base/clients.go | 63 +- .../azblob/internal/exported/blob_batch.go | 279 + .../azblob/internal/exported/exported.go | 2 +- .../azblob/internal/exported/log_events.go | 8 +- .../exported/shared_key_credential.go | 2 +- .../azblob/internal/exported/version.go | 2 +- .../internal/generated/appendblob_client.go | 19 +- .../azblob/internal/generated/autorest.md | 100 +- .../azblob/internal/generated/blob_client.go | 33 +- .../internal/generated/block_blob_client.go | 19 +- .../azblob/internal/generated/constants.go | 9 + .../internal/generated/container_client.go | 19 +- .../azblob/internal/generated/models.go | 76 + .../internal/generated/pageblob_client.go | 19 +- .../internal/generated/service_client.go | 19 +- .../generated/zz_appendblob_client.go | 305 +- .../internal/generated/zz_blob_client.go | 1533 +- .../internal/generated/zz_blockblob_client.go | 415 +- .../azblob/internal/generated/zz_constants.go | 129 +- .../internal/generated/zz_container_client.go | 731 +- .../azblob/internal/generated/zz_models.go | 1217 +- .../internal/generated/zz_models_serde.go | 125 +- .../azblob/internal/generated/zz_options.go | 1469 + .../internal/generated/zz_pageblob_client.go | 576 +- .../internal/generated/zz_response_types.go | 170 +- .../internal/generated/zz_service_client.go | 206 +- .../internal/generated/zz_time_rfc1123.go | 23 +- .../internal/generated/zz_time_rfc3339.go | 35 +- .../internal/generated/zz_xml_helper.go | 26 +- .../azblob/internal/shared/batch_transfer.go | 16 +- .../azblob/internal/shared/buffer_manager.go | 70 + .../internal/shared/challenge_policy.go | 113 + .../shared}/mmf_unix.go | 14 +- .../shared}/mmf_windows.go | 22 +- .../storage/azblob/internal/shared/shared.go | 91 +- .../sdk/storage/azblob/log.go | 7 +- .../sdk/storage/azblob/pageblob/client.go | 52 +- .../sdk/storage/azblob/sas/account.go | 63 +- .../sdk/storage/azblob/sas/query_params.go | 13 +- .../sdk/storage/azblob/sas/service.go | 84 +- .../storage/azblob/service/batch_builder.go | 94 + .../sdk/storage/azblob/service/client.go | 120 +- .../sdk/storage/azblob/service/models.go | 60 + .../sdk/storage/azblob/service/responses.go | 19 + .../ClickHouse/ch-go/proto/block.go | 4 +- .../ClickHouse/ch-go/proto/col_arr.go | 12 +- .../ClickHouse/ch-go/proto/col_auto.go | 6 + .../ClickHouse/ch-go/proto/col_auto_gen.go | 42 + .../ClickHouse/ch-go/proto/col_bool.go | 4 + .../ClickHouse/ch-go/proto/col_bool_safe.go | 2 +- .../ClickHouse/ch-go/proto/col_bool_unsafe.go | 2 +- .../ClickHouse/ch-go/proto/col_date.go | 10 + .../ClickHouse/ch-go/proto/col_date32.go | 10 + .../ch-go/proto/col_date32_safe_gen.go | 2 +- .../ch-go/proto/col_date32_unsafe_gen.go | 2 +- .../ch-go/proto/col_date_safe_gen.go | 2 +- .../ch-go/proto/col_date_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_datetime.go | 10 + .../ClickHouse/ch-go/proto/col_datetime64.go | 15 + .../ch-go/proto/col_datetime64_safe_gen.go | 2 +- .../ch-go/proto/col_datetime64_unsafe_gen.go | 2 +- .../ch-go/proto/col_datetime_safe_gen.go | 2 +- .../ch-go/proto/col_datetime_unsafe_gen.go | 2 +- .../ch-go/proto/col_decimal128_gen.go | 5 + .../ch-go/proto/col_decimal128_safe_gen.go | 2 +- .../ch-go/proto/col_decimal128_unsafe_gen.go | 2 +- .../ch-go/proto/col_decimal256_gen.go | 5 + .../ch-go/proto/col_decimal256_safe_gen.go | 2 +- .../ch-go/proto/col_decimal256_unsafe_gen.go | 2 +- .../ch-go/proto/col_decimal32_gen.go | 5 + .../ch-go/proto/col_decimal32_safe_gen.go | 2 +- .../ch-go/proto/col_decimal32_unsafe_gen.go | 2 +- .../ch-go/proto/col_decimal64_gen.go | 5 + .../ch-go/proto/col_decimal64_safe_gen.go | 2 +- .../ch-go/proto/col_decimal64_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_enum.go | 4 + .../ClickHouse/ch-go/proto/col_enum16_gen.go | 5 + .../ch-go/proto/col_enum16_safe_gen.go | 2 +- .../ch-go/proto/col_enum16_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_enum8_gen.go | 5 + .../ch-go/proto/col_enum8_safe_gen.go | 2 +- .../ch-go/proto/col_enum8_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_fixed_str.go | 6 + .../ch-go/proto/col_fixedstr128_gen.go | 71 + .../ch-go/proto/col_fixedstr128_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr128_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr16_gen.go | 71 + .../ch-go/proto/col_fixedstr16_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr16_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr256_gen.go | 71 + .../ch-go/proto/col_fixedstr256_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr256_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr32_gen.go | 71 + .../ch-go/proto/col_fixedstr32_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr32_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr512_gen.go | 71 + .../ch-go/proto/col_fixedstr512_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr512_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr64_gen.go | 71 + .../ch-go/proto/col_fixedstr64_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr64_unsafe_gen.go | 45 + .../ch-go/proto/col_fixedstr8_gen.go | 71 + .../ch-go/proto/col_fixedstr8_safe_gen.go | 55 + .../ch-go/proto/col_fixedstr8_unsafe_gen.go | 45 + .../ClickHouse/ch-go/proto/col_float32_gen.go | 5 + .../ch-go/proto/col_float32_safe_gen.go | 2 +- .../ch-go/proto/col_float32_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_float64_gen.go | 5 + .../ch-go/proto/col_float64_safe_gen.go | 2 +- .../ch-go/proto/col_float64_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_int128_gen.go | 5 + .../ch-go/proto/col_int128_safe_gen.go | 2 +- .../ch-go/proto/col_int128_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_int16_gen.go | 5 + .../ch-go/proto/col_int16_safe_gen.go | 2 +- .../ch-go/proto/col_int16_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_int256_gen.go | 5 + .../ch-go/proto/col_int256_safe_gen.go | 2 +- .../ch-go/proto/col_int256_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_int32_gen.go | 5 + .../ch-go/proto/col_int32_safe_gen.go | 2 +- .../ch-go/proto/col_int32_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_int64_gen.go | 5 + .../ch-go/proto/col_int64_safe_gen.go | 2 +- .../ch-go/proto/col_int64_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_int8_gen.go | 5 + .../ch-go/proto/col_int8_safe_gen.go | 2 +- .../ch-go/proto/col_int8_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_ipv4_gen.go | 5 + .../ch-go/proto/col_ipv4_safe_gen.go | 2 +- .../ch-go/proto/col_ipv4_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_ipv6_gen.go | 5 + .../ch-go/proto/col_ipv6_safe_gen.go | 2 +- .../ch-go/proto/col_ipv6_unsafe_gen.go | 2 +- .../ch-go/proto/col_low_cardinality.go | 56 +- .../ClickHouse/ch-go/proto/col_map.go | 70 + .../ClickHouse/ch-go/proto/col_nothing.go | 6 +- .../ClickHouse/ch-go/proto/col_nullable.go | 2 +- .../ClickHouse/ch-go/proto/col_raw_of.go | 2 +- .../ClickHouse/ch-go/proto/col_str.go | 18 +- .../ClickHouse/ch-go/proto/col_uint128_gen.go | 5 + .../ch-go/proto/col_uint128_safe_gen.go | 2 +- .../ch-go/proto/col_uint128_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_uint16_gen.go | 5 + .../ch-go/proto/col_uint16_safe_gen.go | 2 +- .../ch-go/proto/col_uint16_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_uint256_gen.go | 5 + .../ch-go/proto/col_uint256_safe_gen.go | 2 +- .../ch-go/proto/col_uint256_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_uint32_gen.go | 5 + .../ch-go/proto/col_uint32_safe_gen.go | 2 +- .../ch-go/proto/col_uint32_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_uint64_gen.go | 5 + .../ch-go/proto/col_uint64_safe_gen.go | 2 +- .../ch-go/proto/col_uint64_unsafe_gen.go | 2 +- .../ClickHouse/ch-go/proto/col_uint8_gen.go | 5 + .../ClickHouse/ch-go/proto/col_uuid.go | 10 + .../ClickHouse/ch-go/proto/col_uuid_safe.go | 2 +- .../ClickHouse/ch-go/proto/col_uuid_unsafe.go | 2 +- .../ClickHouse/ch-go/proto/column.go | 1 + .../ClickHouse/ch-go/proto/date32.go | 9 +- .../ClickHouse/ch-go/proto/profile_enum.go | 32 +- .../ClickHouse/ch-go/proto/profile_events.go | 14 +- .../ClickHouse/ch-go/proto/proto.go | 2 +- .../ClickHouse/ch-go/proto/reader.go | 6 - .../ClickHouse/ch-go/proto/server_log.go | 14 +- .../ClickHouse/ch-go/proto/slice_unsafe.go | 2 +- .../ClickHouse/clickhouse-go/v2/.gitignore | 7 +- .../ClickHouse/clickhouse-go/v2/CHANGELOG.md | 213 + .../ClickHouse/clickhouse-go/v2/README.md | 20 +- .../ClickHouse/clickhouse-go/v2/TYPES.md | 134 +- .../ClickHouse/clickhouse-go/v2/bind.go | 117 +- .../ClickHouse/clickhouse-go/v2/clickhouse.go | 37 +- .../clickhouse-go/v2/clickhouse_options.go | 19 + .../clickhouse-go/v2/clickhouse_rows.go | 2 +- .../clickhouse-go/v2/clickhouse_std.go | 19 +- .../clickhouse-go/v2/client_info.go | 2 +- .../ClickHouse/clickhouse-go/v2/conn.go | 2 - .../clickhouse-go/v2/conn_async_insert.go | 13 +- .../ClickHouse/clickhouse-go/v2/conn_batch.go | 127 +- .../ClickHouse/clickhouse-go/v2/conn_http.go | 15 +- .../v2/conn_http_async_insert.go | 10 +- .../clickhouse-go/v2/conn_http_batch.go | 11 +- .../clickhouse-go/v2/conn_send_query.go | 21 +- .../clickhouse-go/v2/context_watchdog.go | 47 + .../clickhouse-go/v2/contributors/list | 146 +- .../clickhouse-go/v2/lib/column/array.go | 65 +- .../clickhouse-go/v2/lib/column/array_gen.go | 175 + .../clickhouse-go/v2/lib/column/bigint.go | 25 + .../clickhouse-go/v2/lib/column/bool.go | 27 +- .../clickhouse-go/v2/lib/column/column_gen.go | 308 +- .../clickhouse-go/v2/lib/column/date.go | 25 + .../clickhouse-go/v2/lib/column/date32.go | 25 + .../clickhouse-go/v2/lib/column/datetime.go | 25 + .../clickhouse-go/v2/lib/column/datetime64.go | 29 +- .../clickhouse-go/v2/lib/column/decimal.go | 25 + .../clickhouse-go/v2/lib/column/enum16.go | 31 + .../clickhouse-go/v2/lib/column/enum8.go | 26 + .../v2/lib/column/fixed_string.go | 28 +- .../v2/lib/column/geo_multi_polygon.go | 25 + .../clickhouse-go/v2/lib/column/geo_point.go | 25 + .../v2/lib/column/geo_polygon.go | 25 + .../clickhouse-go/v2/lib/column/geo_ring.go | 25 + .../clickhouse-go/v2/lib/column/ipv4.go | 48 +- .../clickhouse-go/v2/lib/column/ipv6.go | 38 + .../clickhouse-go/v2/lib/column/json.go | 9 +- .../clickhouse-go/v2/lib/column/map.go | 68 +- .../clickhouse-go/v2/lib/column/string.go | 33 +- .../clickhouse-go/v2/lib/column/tuple.go | 43 +- .../clickhouse-go/v2/lib/column/uuid.go | 26 + .../clickhouse-go/v2/lib/driver/driver.go | 5 +- .../clickhouse-go/v2/lib/driver/options.go | 13 + .../clickhouse-go/v2/lib/proto/block.go | 2 +- .../clickhouse-go/v2/lib/proto/const.go | 37 +- .../clickhouse-go/v2/lib/proto/progress.go | 16 +- .../ClickHouse/clickhouse-go/v2/main.tf | 59 + .../github.com/andybalholm/brotli/README.md | 7 + .../andybalholm/brotli/bitwriter.go | 56 + .../andybalholm/brotli/brotli_bit_stream.go | 311 +- .../brotli/compress_fragment_two_pass.go | 51 +- .../github.com/andybalholm/brotli/encoder.go | 168 + .../brotli/entropy_encode_static.go | 5 + vendor/github.com/andybalholm/brotli/http.go | 10 +- .../andybalholm/brotli/matchfinder/emitter.go | 45 + .../andybalholm/brotli/matchfinder/m0.go | 169 + .../andybalholm/brotli/matchfinder/m4.go | 297 + .../brotli/matchfinder/matchfinder.go | 103 + .../brotli/matchfinder/textencoder.go | 53 + .../github.com/andybalholm/brotli/writer.go | 43 + .../apache/arrow/go/v12/arrow/array/list.go | 589 - .../apache/arrow/go/v12/parquet/.gitignore | 31 - .../arrow/go/v12/parquet/compress/brotli.go | 115 - .../arrow/go/v12/parquet/compress/compress.go | 156 - .../arrow/go/v12/parquet/compress/gzip.go | 98 - .../arrow/go/v12/parquet/compress/snappy.go | 62 - .../arrow/go/v12/parquet/compress/zstd.go | 112 - .../apache/arrow/go/v12/parquet/doc.go | 70 - .../go/v12/parquet/encryption_properties.go | 711 - .../v12/parquet/internal/debug/assert_off.go | 24 - .../go/v12/parquet/internal/debug/doc.go | 23 - .../go/v12/parquet/internal/debug/log_off.go | 24 - .../gen-go/parquet/GoUnusedProtection__.go | 6 - .../internal/gen-go/parquet/parquet-consts.go | 23 - .../internal/gen-go/parquet/parquet.go | 10967 ---- .../internal/gen-go/parquet/staticcheck.conf | 17 - .../arrow/go/v12/parquet/reader_properties.go | 88 - .../apache/arrow/go/v12/parquet/types.go | 391 - .../arrow/go/v12/parquet/version_string.go | 25 - .../arrow/go/v12/parquet/writer_properties.go | 533 - .../apache/arrow/go/{v12 => v14}/LICENSE.txt | 11 - .../arrow/go/{v12 => v14}/arrow/.editorconfig | 0 .../arrow/go/{v12 => v14}/arrow/.gitignore | 0 .../arrow/go/{v12 => v14}/arrow/Gopkg.lock | 0 .../arrow/go/{v12 => v14}/arrow/Gopkg.toml | 0 .../arrow/go/{v12 => v14}/arrow/Makefile | 0 .../arrow/go/{v12 => v14}/arrow/array.go | 22 +- .../go/{v12 => v14}/arrow/array/array.go | 13 +- .../go/{v12 => v14}/arrow/array/binary.go | 23 +- .../{v12 => v14}/arrow/array/binarybuilder.go | 39 +- .../go/{v12 => v14}/arrow/array/boolean.go | 19 +- .../arrow/array/booleanbuilder.go | 39 +- .../{v12 => v14}/arrow/array/bufferbuilder.go | 6 +- .../arrow/array/bufferbuilder_byte.go | 2 +- .../arrow/array/bufferbuilder_numeric.gen.go | 6 +- .../array/bufferbuilder_numeric.gen.go.tmpl | 6 +- .../go/{v12 => v14}/arrow/array/builder.go | 40 +- .../go/{v12 => v14}/arrow/array/compare.go | 190 +- .../go/{v12 => v14}/arrow/array/concat.go | 215 +- .../arrow/go/{v12 => v14}/arrow/array/data.go | 6 +- .../go/{v12 => v14}/arrow/array/decimal128.go | 47 +- .../go/{v12 => v14}/arrow/array/decimal256.go | 47 +- .../go/{v12 => v14}/arrow/array/dictionary.go | 280 +- .../arrow/go/{v12 => v14}/arrow/array/diff.go | 2 +- .../arrow/go/{v12 => v14}/arrow/array/doc.go | 0 .../go/{v12 => v14}/arrow/array/encoded.go | 113 +- .../go/{v12 => v14}/arrow/array/extension.go | 59 +- .../arrow/array/extension_builder.go | 0 .../arrow/array/fixed_size_list.go | 65 +- .../arrow/array/fixedsize_binary.go | 13 +- .../arrow/array/fixedsize_binarybuilder.go | 37 +- .../go/{v12 => v14}/arrow/array/float16.go | 14 +- .../arrow/array/float16_builder.go | 38 +- .../go/{v12 => v14}/arrow/array/interval.go | 126 +- .../{v12 => v14}/arrow/array/json_reader.go | 8 +- .../apache/arrow/go/v14/arrow/array/list.go | 1688 + .../arrow/go/{v12 => v14}/arrow/array/map.go | 46 +- .../arrow/go/{v12 => v14}/arrow/array/null.go | 30 +- .../{v12 => v14}/arrow/array/numeric.gen.go | 240 +- .../arrow/array/numeric.gen.go.tmpl | 40 +- .../arrow/array/numericbuilder.gen.go | 675 +- .../arrow/array/numericbuilder.gen.go.tmpl | 104 +- .../array/numericbuilder.gen_test.go.tmpl | 66 +- .../go/{v12 => v14}/arrow/array/record.go | 8 +- .../go/{v12 => v14}/arrow/array/string.go | 62 +- .../go/{v12 => v14}/arrow/array/struct.go | 63 +- .../go/{v12 => v14}/arrow/array/table.go | 52 +- .../arrow/go/v14/arrow/array/timestamp.go | 381 + .../go/{v12 => v14}/arrow/array/union.go | 93 +- .../arrow/go/{v12 => v14}/arrow/array/util.go | 51 +- .../go/{v12 => v14}/arrow/arrio/arrio.go | 2 +- .../go/{v12 => v14}/arrow/bitutil/Makefile | 0 .../{v12 => v14}/arrow/bitutil/bitmap_ops.go | 0 .../arrow/bitutil/bitmap_ops_amd64.go | 0 .../arrow/bitutil/bitmap_ops_arm64.go | 0 .../arrow/bitutil/bitmap_ops_avx2_amd64.go | 0 .../arrow/bitutil/bitmap_ops_avx2_amd64.s | 0 .../arrow/bitutil/bitmap_ops_noasm.go | 0 .../arrow/bitutil/bitmap_ops_ppc64le.go | 0 .../arrow/bitutil/bitmap_ops_s390x.go | 0 .../arrow/bitutil/bitmap_ops_sse4_amd64.go | 0 .../arrow/bitutil/bitmap_ops_sse4_amd64.s | 0 .../go/{v12 => v14}/arrow/bitutil/bitmaps.go | 6 +- .../go/{v12 => v14}/arrow/bitutil/bitutil.go | 15 +- .../arrow/bitutil/endian_default.go | 0 .../arrow/bitutil/endian_s390x.go | 0 .../arrow/go/{v12 => v14}/arrow/compare.go | 32 +- .../{v12 => v14}/arrow/compute/arithmetic.go | 23 +- .../go/{v12 => v14}/arrow/compute/cast.go | 10 +- .../go/{v12 => v14}/arrow/compute/datum.go | 6 +- .../arrow/compute/datumkind_string.go | 0 .../go/{v12 => v14}/arrow/compute/doc.go | 0 .../go/{v12 => v14}/arrow/compute/exec.go | 12 +- .../arrow/compute}/exec/hash_util.go | 0 .../arrow/compute}/exec/kernel.go | 8 +- .../arrow/compute}/exec/span.go | 10 +- .../arrow/compute}/exec/utils.go | 24 +- .../go/{v12 => v14}/arrow/compute/executor.go | 43 +- .../{v12 => v14}/arrow/compute/expression.go | 27 +- .../go/{v12 => v14}/arrow/compute/fieldref.go | 4 +- .../arrow/compute/funckind_string.go | 0 .../{v12 => v14}/arrow/compute/functions.go | 4 +- .../arrow/compute/internal/kernels/Makefile | 0 .../internal/kernels/base_arithmetic.go | 10 +- .../internal/kernels/base_arithmetic_amd64.go | 4 +- .../kernels/base_arithmetic_avx2_amd64.go | 2 +- .../kernels/base_arithmetic_avx2_amd64.s | 0 .../kernels/base_arithmetic_sse4_amd64.go | 2 +- .../kernels/base_arithmetic_sse4_amd64.s | 0 .../kernels/basic_arithmetic_noasm.go | 2 +- .../compute/internal/kernels/boolean_cast.go | 6 +- .../arrow/compute/internal/kernels/cast.go | 7 +- .../compute/internal/kernels/cast_numeric.go | 2 +- .../internal/kernels/cast_numeric_amd64.go | 0 .../kernels/cast_numeric_avx2_amd64.go | 2 +- .../kernels/cast_numeric_avx2_amd64.s | 0 .../kernels/cast_numeric_neon_arm64.go | 2 +- .../kernels/cast_numeric_neon_arm64.s | 0 .../kernels/cast_numeric_sse4_amd64.go | 2 +- .../kernels/cast_numeric_sse4_amd64.s | 0 .../compute/internal/kernels/cast_temporal.go | 8 +- .../kernels/compareoperator_string.go | 0 .../internal/kernels/constant_factor.go | 0 .../internal/kernels/constant_factor_amd64.go | 0 .../kernels/constant_factor_avx2_amd64.go | 0 .../kernels/constant_factor_avx2_amd64.s | 0 .../kernels/constant_factor_sse4_amd64.go | 0 .../kernels/constant_factor_sse4_amd64.s | 0 .../arrow/compute/internal/kernels/doc.go | 0 .../arrow/compute/internal/kernels/helpers.go | 14 +- .../compute/internal/kernels/numeric_cast.go | 14 +- .../compute/internal/kernels/rounding.go | 10 +- .../internal/kernels/roundmode_string.go | 0 .../internal/kernels/scalar_arithmetic.go | 14 +- .../internal/kernels/scalar_boolean.go | 6 +- .../kernels/scalar_comparison_amd64.go | 4 +- .../kernels/scalar_comparison_avx2_amd64.go | 2 +- .../kernels/scalar_comparison_avx2_amd64.s | 0 .../kernels/scalar_comparison_noasm.go | 2 +- .../kernels/scalar_comparison_sse4_amd64.go | 2 +- .../kernels/scalar_comparison_sse4_amd64.s | 0 .../internal/kernels/scalar_comparisons.go | 16 +- .../compute/internal/kernels/string_casts.go | 12 +- .../arrow/compute/internal/kernels/types.go | 8 +- .../compute/internal/kernels/vector_hash.go | 14 +- .../internal/kernels/vector_run_end_encode.go | 16 +- .../internal/kernels/vector_selection.go | 14 +- .../go/{v12 => v14}/arrow/compute/registry.go | 2 +- .../{v12 => v14}/arrow/compute/scalar_bool.go | 6 +- .../arrow/compute/scalar_compare.go | 6 +- .../{v12 => v14}/arrow/compute/selection.go | 8 +- .../go/{v12 => v14}/arrow/compute/utils.go | 12 +- .../{v12 => v14}/arrow/compute/vector_hash.go | 4 +- .../arrow/compute/vector_run_ends.go | 4 +- .../arrow/go/{v12 => v14}/arrow/datatype.go | 17 +- .../go/{v12 => v14}/arrow/datatype_binary.go | 0 .../go/{v12 => v14}/arrow/datatype_encoded.go | 0 .../{v12 => v14}/arrow/datatype_extension.go | 0 .../{v12 => v14}/arrow/datatype_fixedwidth.go | 70 +- .../go/{v12 => v14}/arrow/datatype_nested.go | 301 +- .../go/{v12 => v14}/arrow/datatype_null.go | 0 .../arrow/datatype_numeric.gen.go | 0 .../arrow/datatype_numeric.gen.go.tmpl | 0 .../arrow/datatype_numeric.gen.go.tmpldata | 0 .../arrow/decimal128/decimal128.go | 66 +- .../arrow/decimal256/decimal256.go | 49 +- .../apache/arrow/go/{v12 => v14}/arrow/doc.go | 11 +- .../{v12 => v14}/arrow/encoded/ree_utils.go | 2 +- .../arrow/go/{v12 => v14}/arrow/endian/big.go | 0 .../go/{v12 => v14}/arrow/endian/endian.go | 4 +- .../go/{v12 => v14}/arrow/endian/little.go | 0 .../arrow/go/{v12 => v14}/arrow/errors.go | 1 + .../go/{v12 => v14}/arrow/float16/float16.go | 95 + .../arrow/internal/debug/assert_off.go | 0 .../arrow/internal/debug/assert_on.go | 0 .../{v12 => v14}/arrow/internal/debug/doc.go | 0 .../arrow/internal/debug/log_off.go | 0 .../arrow/internal/debug/log_on.go | 0 .../{v12 => v14}/arrow/internal/debug/util.go | 0 .../arrow/internal/dictutils/dict.go | 6 +- .../arrow/internal/flatbuf/Binary.go | 0 .../v14/arrow/internal/flatbuf/BinaryView.go | 57 + .../arrow/internal/flatbuf/Block.go | 0 .../arrow/internal/flatbuf/BodyCompression.go | 0 .../internal/flatbuf/BodyCompressionMethod.go | 0 .../arrow/internal/flatbuf/Bool.go | 0 .../arrow/internal/flatbuf/Buffer.go | 0 .../arrow/internal/flatbuf/CompressionType.go | 0 .../arrow/internal/flatbuf/Date.go | 0 .../arrow/internal/flatbuf/DateUnit.go | 0 .../arrow/internal/flatbuf/Decimal.go | 0 .../arrow/internal/flatbuf/DictionaryBatch.go | 0 .../internal/flatbuf/DictionaryEncoding.go | 0 .../arrow/internal/flatbuf/DictionaryKind.go | 0 .../arrow/internal/flatbuf/Duration.go | 0 .../arrow/internal/flatbuf/Endianness.go | 0 .../arrow/internal/flatbuf/Feature.go | 0 .../arrow/internal/flatbuf/Field.go | 0 .../arrow/internal/flatbuf/FieldNode.go | 0 .../arrow/internal/flatbuf/FixedSizeBinary.go | 0 .../arrow/internal/flatbuf/FixedSizeList.go | 0 .../arrow/internal/flatbuf/FloatingPoint.go | 0 .../arrow/internal/flatbuf/Footer.go | 0 .../arrow/internal/flatbuf/Int.go | 0 .../arrow/internal/flatbuf/Interval.go | 0 .../arrow/internal/flatbuf/IntervalUnit.go | 0 .../arrow/internal/flatbuf/KeyValue.go | 0 .../arrow/internal/flatbuf/LargeBinary.go | 0 .../arrow/internal/flatbuf/LargeList.go | 0 .../arrow/internal/flatbuf/LargeListView.go | 52 + .../arrow/internal/flatbuf/LargeUtf8.go | 0 .../arrow/internal/flatbuf/List.go | 0 .../go/v14/arrow/internal/flatbuf/ListView.go | 53 + .../arrow/internal/flatbuf/Map.go | 0 .../arrow/internal/flatbuf/Message.go | 0 .../arrow/internal/flatbuf/MessageHeader.go | 0 .../arrow/internal/flatbuf/MetadataVersion.go | 0 .../arrow/internal/flatbuf/Null.go | 0 .../arrow/internal/flatbuf/Precision.go | 0 .../arrow/internal/flatbuf/RecordBatch.go | 62 +- .../arrow/internal/flatbuf/RunEndEncoded.go | 0 .../internal/flatbuf/RunLengthEncoded.go | 0 .../arrow/internal/flatbuf/Schema.go | 0 .../flatbuf/SparseMatrixCompressedAxis.go | 0 .../internal/flatbuf/SparseMatrixIndexCSR.go | 0 .../internal/flatbuf/SparseMatrixIndexCSX.go | 0 .../arrow/internal/flatbuf/SparseTensor.go | 0 .../internal/flatbuf/SparseTensorIndex.go | 0 .../internal/flatbuf/SparseTensorIndexCOO.go | 0 .../internal/flatbuf/SparseTensorIndexCSF.go | 0 .../arrow/internal/flatbuf/Struct_.go | 0 .../arrow/internal/flatbuf/Tensor.go | 0 .../arrow/internal/flatbuf/TensorDim.go | 0 .../arrow/internal/flatbuf/Time.go | 0 .../arrow/internal/flatbuf/TimeUnit.go | 0 .../arrow/internal/flatbuf/Timestamp.go | 0 .../arrow/internal/flatbuf/Type.go | 12 + .../arrow/internal/flatbuf/Union.go | 0 .../arrow/internal/flatbuf/UnionMode.go | 0 .../arrow/internal/flatbuf/Utf8.go | 0 .../go/v14/arrow/internal/flatbuf/Utf8View.go | 57 + .../go/{v12 => v14}/arrow/internal/utils.go | 4 +- .../go/{v12 => v14}/arrow/ipc/compression.go | 6 +- .../go/{v12 => v14}/arrow/ipc/endian_swap.go | 6 +- .../go/{v12 => v14}/arrow/ipc/file_reader.go | 42 +- .../go/{v12 => v14}/arrow/ipc/file_writer.go | 10 +- .../arrow/go/{v12 => v14}/arrow/ipc/ipc.go | 8 +- .../go/{v12 => v14}/arrow/ipc/message.go | 6 +- .../go/{v12 => v14}/arrow/ipc/metadata.go | 38 +- .../arrow/go/{v12 => v14}/arrow/ipc/reader.go | 15 +- .../arrow/go/{v12 => v14}/arrow/ipc/writer.go | 153 +- .../go/{v12 => v14}/arrow/memory/Makefile | 0 .../go/{v12 => v14}/arrow/memory/allocator.go | 0 .../go/{v12 => v14}/arrow/memory/buffer.go | 2 +- .../arrow/memory/cgo_allocator.go | 2 +- .../arrow/memory/cgo_allocator_defaults.go | 0 .../arrow/memory/cgo_allocator_logging.go | 0 .../arrow/memory/checked_allocator.go | 86 +- .../arrow/memory/default_allocator.go | 0 .../arrow/memory/default_mallocator.go | 2 +- .../arrow/go/{v12 => v14}/arrow/memory/doc.go | 0 .../{v12 => v14}/arrow/memory/go_allocator.go | 5 +- .../memory/internal/cgoalloc/allocator.cc | 0 .../memory/internal/cgoalloc/allocator.go | 3 +- .../memory/internal/cgoalloc/allocator.h | 0 .../arrow/memory/internal/cgoalloc/helpers.h | 0 .../arrow/memory/mallocator/doc.go | 0 .../arrow/memory/mallocator/mallocator.go | 0 .../go/{v12 => v14}/arrow/memory/memory.go | 0 .../{v12 => v14}/arrow/memory/memory_amd64.go | 0 .../{v12 => v14}/arrow/memory/memory_arm64.go | 0 .../arrow/memory/memory_avx2_amd64.go | 0 .../arrow/memory/memory_avx2_amd64.s | 0 .../arrow/memory/memory_js_wasm.go | 0 .../arrow/memory/memory_neon_arm64.go | 0 .../arrow/memory/memory_neon_arm64.s | 0 .../{v12 => v14}/arrow/memory/memory_noasm.go | 0 .../arrow/memory/memory_sse4_amd64.go | 0 .../arrow/memory/memory_sse4_amd64.s | 0 .../go/{v12 => v14}/arrow/memory/util.go | 0 .../go/{v12 => v14}/arrow/numeric.schema.json | 0 .../go/{v12 => v14}/arrow/numeric.tmpldata | 12 - .../arrow/go/{v12 => v14}/arrow/record.go | 2 +- .../go/{v12 => v14}/arrow/scalar/append.go | 10 +- .../go/{v12 => v14}/arrow/scalar/binary.go | 4 +- .../go/{v12 => v14}/arrow/scalar/compare.go | 2 +- .../go/{v12 => v14}/arrow/scalar/nested.go | 23 +- .../{v12 => v14}/arrow/scalar/numeric.gen.go | 6 +- .../arrow/scalar/numeric.gen.go.tmpl | 0 .../arrow/scalar/numeric.gen.go.tmpldata | 0 .../arrow/scalar/numeric.gen_test.go.tmpl | 4 +- .../go/{v12 => v14}/arrow/scalar/parse.go | 14 +- .../go/{v12 => v14}/arrow/scalar/scalar.go | 22 +- .../go/{v12 => v14}/arrow/scalar/temporal.go | 2 +- .../arrow/go/{v12 => v14}/arrow/schema.go | 31 +- .../arrow/go/{v12 => v14}/arrow/table.go | 6 +- .../arrow/go/{v12 => v14}/arrow/tools.go | 0 .../go/{v12 => v14}/arrow/type_string.go | 8 +- .../{v12 => v14}/arrow/type_traits_boolean.go | 2 +- .../arrow/type_traits_decimal128.go | 20 +- .../arrow/type_traits_decimal256.go | 20 +- .../{v12 => v14}/arrow/type_traits_float16.go | 20 +- .../arrow/type_traits_interval.go | 52 +- .../arrow/type_traits_numeric.gen.go | 321 +- .../arrow/type_traits_numeric.gen.go.tmpl | 18 +- .../type_traits_numeric.gen_test.go.tmpl | 2 +- .../go/v14/arrow/type_traits_timestamp.go | 59 + .../go/{v12 => v14}/arrow/unionmode_string.go | 0 .../internal/bitutils/bit_block_counter.go | 4 +- .../internal/bitutils/bit_run_reader.go | 6 +- .../internal/bitutils/bit_set_run_reader.go | 4 +- .../internal/bitutils/bitmap_generate.go | 2 +- .../go/v14/internal/hashing/hash_funcs.go | 90 + .../internal/hashing/hash_string.go} | 18 +- .../internal/hashing/hash_string_go1.19.go} | 23 +- .../internal/hashing/types.tmpldata | 0 .../internal/hashing/xxh3_memo_table.gen.go | 56 +- .../hashing/xxh3_memo_table.gen.go.tmpl | 10 +- .../internal/hashing/xxh3_memo_table.go | 131 +- .../apache/arrow/go/v14/internal/json/json.go | 51 + .../internal/json/json_tinygo.go} | 41 +- .../go/{v12 => v14}/internal/utils/Makefile | 0 .../{v12 => v14}/internal/utils/buf_reader.go | 0 .../internal/utils/endians_default.go | 0 .../internal/utils/endians_s390x.go | 0 .../go/{v12 => v14}/internal/utils/math.go | 0 .../go/{v12 => v14}/internal/utils/min_max.go | 0 .../internal/utils/min_max_amd64.go | 0 .../internal/utils/min_max_arm64.go | 0 .../internal/utils/min_max_avx2_amd64.go | 0 .../internal/utils/min_max_avx2_amd64.s | 0 .../internal/utils/min_max_neon_arm64.go | 0 .../internal/utils/min_max_neon_arm64.s | 0 .../internal/utils/min_max_noasm.go | 0 .../internal/utils/min_max_ppc64le.go | 0 .../internal/utils/min_max_s390x.go | 0 .../internal/utils/min_max_sse4_amd64.go | 0 .../internal/utils/min_max_sse4_amd64.s | 0 .../internal/utils/transpose_ints.go | 0 .../internal/utils/transpose_ints.go.tmpl | 0 .../internal/utils/transpose_ints.tmpldata | 0 .../internal/utils/transpose_ints_amd64.go | 0 .../utils/transpose_ints_amd64.go.tmpl | 0 .../internal/utils/transpose_ints_arm64.go | 0 .../utils/transpose_ints_avx2_amd64.go | 0 .../utils/transpose_ints_avx2_amd64.s | 0 .../internal/utils/transpose_ints_def.go | 2 +- .../internal/utils/transpose_ints_noasm.go | 0 .../utils/transpose_ints_noasm.go.tmpl | 0 .../internal/utils/transpose_ints_ppc64le.go | 0 .../internal/utils/transpose_ints_s390x.go | 0 .../utils/transpose_ints_s390x.go.tmpl | 0 .../utils/transpose_ints_simd.go.tmpl | 0 .../utils/transpose_ints_sse4_amd64.go | 0 .../utils/transpose_ints_sse4_amd64.s | 0 vendor/github.com/apache/thrift/NOTICE | 5 - .../lib/go/thrift/application_exception.go | 183 - .../thrift/lib/go/thrift/binary_protocol.go | 548 - .../apache/thrift/lib/go/thrift/buf_pool.go | 52 - .../lib/go/thrift/buffered_transport.go | 99 - .../apache/thrift/lib/go/thrift/client.go | 109 - .../thrift/lib/go/thrift/compact_protocol.go | 846 - .../thrift/lib/go/thrift/configuration.go | 378 - .../apache/thrift/lib/go/thrift/context.go | 24 - .../thrift/lib/go/thrift/debug_protocol.go | 447 - .../thrift/lib/go/thrift/deserializer.go | 121 - .../apache/thrift/lib/go/thrift/exception.go | 116 - .../thrift/lib/go/thrift/framed_transport.go | 250 - .../thrift/lib/go/thrift/header_context.go | 110 - .../thrift/lib/go/thrift/header_protocol.go | 351 - .../thrift/lib/go/thrift/header_transport.go | 816 - .../thrift/lib/go/thrift/http_client.go | 256 - .../thrift/lib/go/thrift/http_transport.go | 74 - .../lib/go/thrift/iostream_transport.go | 222 - .../thrift/lib/go/thrift/json_protocol.go | 564 - .../apache/thrift/lib/go/thrift/logger.go | 69 - .../thrift/lib/go/thrift/memory_buffer.go | 80 - .../thrift/lib/go/thrift/messagetype.go | 31 - .../apache/thrift/lib/go/thrift/middleware.go | 109 - .../lib/go/thrift/multiplexed_protocol.go | 237 - .../apache/thrift/lib/go/thrift/numeric.go | 164 - .../apache/thrift/lib/go/thrift/pointerize.go | 52 - .../thrift/lib/go/thrift/processor_factory.go | 80 - .../apache/thrift/lib/go/thrift/protocol.go | 184 - .../lib/go/thrift/protocol_exception.go | 104 - .../thrift/lib/go/thrift/protocol_factory.go | 25 - .../thrift/lib/go/thrift/response_helper.go | 94 - .../thrift/lib/go/thrift/rich_transport.go | 71 - .../apache/thrift/lib/go/thrift/serializer.go | 136 - .../apache/thrift/lib/go/thrift/server.go | 35 - .../thrift/lib/go/thrift/server_socket.go | 137 - .../thrift/lib/go/thrift/server_transport.go | 34 - .../lib/go/thrift/simple_json_protocol.go | 1334 - .../thrift/lib/go/thrift/simple_server.go | 336 - .../apache/thrift/lib/go/thrift/socket.go | 241 - .../thrift/lib/go/thrift/socket_conn.go | 124 - .../lib/go/thrift/socket_non_unix_conn.go | 35 - .../thrift/lib/go/thrift/socket_unix_conn.go | 84 - .../thrift/lib/go/thrift/ssl_server_socket.go | 112 - .../apache/thrift/lib/go/thrift/ssl_socket.go | 262 - .../thrift/lib/go/thrift/staticcheck.conf | 4 - .../apache/thrift/lib/go/thrift/transport.go | 70 - .../lib/go/thrift/transport_exception.go | 131 - .../thrift/lib/go/thrift/transport_factory.go | 39 - .../apache/thrift/lib/go/thrift/type.go | 69 - .../thrift/lib/go/thrift/zlib_transport.go | 137 - .../aws/aws-sdk-go-v2/aws/config.go | 33 +- .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/aws/middleware/metadata.go | 47 +- .../aws/middleware/private/metrics/metrics.go | 319 + .../aws/middleware/recursion_detection.go | 94 + .../aws/middleware/user_agent.go | 26 +- .../aws/protocol/eventstream/CHANGELOG.md | 36 + .../eventstream/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/aws/retry/middleware.go | 14 +- .../aws/retry/retryable_error.go | 15 + .../aws/aws-sdk-go-v2/aws/retryer.go | 2 +- .../aws/signer/internal/v4/headers.go | 2 + .../aws-sdk-go-v2/aws/signer/v4/middleware.go | 165 +- .../aws/aws-sdk-go-v2/aws/signer/v4/v4.go | 18 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 186 + .../credentials/go_module_metadata.go | 2 +- .../feature/s3/manager/CHANGELOG.md | 264 + .../feature/s3/manager/bucket_region.go | 1 - .../feature/s3/manager/download.go | 4 +- .../feature/s3/manager/go_module_metadata.go | 2 +- .../feature/s3/manager/upload.go | 61 +- .../aws/aws-sdk-go-v2/internal/auth/auth.go | 45 + .../aws/aws-sdk-go-v2/internal/auth/scheme.go | 191 + .../auth/smithy/bearer_token_adapter.go | 43 + .../smithy/bearer_token_signer_adapter.go | 35 + .../auth/smithy/credentials_adapter.go | 46 + .../internal/auth/smithy/smithy.go | 2 + .../internal/auth/smithy/v4signer_adapter.go | 53 + .../internal/configsources/CHANGELOG.md | 93 + .../internal/configsources/endpoints.go | 57 + .../configsources/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/context/context.go | 39 + .../internal/endpoints/awsrulesfn/arn.go | 94 + .../internal/endpoints/awsrulesfn/doc.go | 3 + .../internal/endpoints/awsrulesfn/generate.go | 7 + .../internal/endpoints/awsrulesfn/host.go | 51 + .../endpoints/awsrulesfn/partition.go | 75 + .../endpoints/awsrulesfn/partitions.go | 381 + .../endpoints/awsrulesfn/partitions.json | 216 + .../internal/endpoints/endpoints.go | 201 + .../internal/endpoints/v2/CHANGELOG.md | 93 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/v4a/CHANGELOG.md | 94 + .../internal/v4a/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/v4a/middleware.go | 27 +- .../aws/aws-sdk-go-v2/internal/v4a/smithy.go | 86 + .../internal/accept-encoding/CHANGELOG.md | 36 + .../accept-encoding/go_module_metadata.go | 2 +- .../service/internal/checksum/CHANGELOG.md | 93 + .../internal/checksum/go_module_metadata.go | 2 +- .../internal/checksum/middleware_add.go | 21 +- .../middleware_compute_input_checksum.go | 65 +- .../checksum/middleware_setup_context.go | 23 +- .../internal/presigned-url/CHANGELOG.md | 93 + .../presigned-url/go_module_metadata.go | 2 +- .../service/internal/s3shared/CHANGELOG.md | 94 + .../internal/s3shared/arn/arn_member.go | 32 + .../internal/s3shared/config/config.go | 19 + .../internal/s3shared/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/s3/CHANGELOG.md | 188 + .../aws-sdk-go-v2/service/s3/api_client.go | 444 +- .../service/s3/api_op_AbortMultipartUpload.go | 170 +- .../s3/api_op_CompleteMultipartUpload.go | 386 +- .../service/s3/api_op_CopyObject.go | 772 +- .../service/s3/api_op_CreateBucket.go | 279 +- .../s3/api_op_CreateMultipartUpload.go | 740 +- .../service/s3/api_op_CreateSession.go | 260 + .../service/s3/api_op_DeleteBucket.go | 93 +- ...i_op_DeleteBucketAnalyticsConfiguration.go | 85 +- .../service/s3/api_op_DeleteBucketCors.go | 71 +- .../s3/api_op_DeleteBucketEncryption.go | 78 +- ...teBucketIntelligentTieringConfiguration.go | 69 +- ...i_op_DeleteBucketInventoryConfiguration.go | 86 +- .../s3/api_op_DeleteBucketLifecycle.go | 81 +- ...api_op_DeleteBucketMetricsConfiguration.go | 99 +- .../api_op_DeleteBucketOwnershipControls.go | 70 +- .../service/s3/api_op_DeleteBucketPolicy.go | 120 +- .../s3/api_op_DeleteBucketReplication.go | 82 +- .../service/s3/api_op_DeleteBucketTagging.go | 68 +- .../service/s3/api_op_DeleteBucketWebsite.go | 84 +- .../service/s3/api_op_DeleteObject.go | 217 +- .../service/s3/api_op_DeleteObjectTagging.go | 100 +- .../service/s3/api_op_DeleteObjects.go | 271 +- .../s3/api_op_DeletePublicAccessBlock.go | 83 +- ...api_op_GetBucketAccelerateConfiguration.go | 98 +- .../service/s3/api_op_GetBucketAcl.go | 93 +- .../api_op_GetBucketAnalyticsConfiguration.go | 88 +- .../service/s3/api_op_GetBucketCors.go | 88 +- .../service/s3/api_op_GetBucketEncryption.go | 87 +- ...etBucketIntelligentTieringConfiguration.go | 67 +- .../api_op_GetBucketInventoryConfiguration.go | 83 +- .../api_op_GetBucketLifecycleConfiguration.go | 112 +- .../service/s3/api_op_GetBucketLocation.go | 101 +- .../service/s3/api_op_GetBucketLogging.go | 72 +- .../api_op_GetBucketMetricsConfiguration.go | 98 +- ...i_op_GetBucketNotificationConfiguration.go | 105 +- .../s3/api_op_GetBucketOwnershipControls.go | 76 +- .../service/s3/api_op_GetBucketPolicy.go | 127 +- .../s3/api_op_GetBucketPolicyStatus.go | 87 +- .../service/s3/api_op_GetBucketReplication.go | 86 +- .../s3/api_op_GetBucketRequestPayment.go | 63 +- .../service/s3/api_op_GetBucketTagging.go | 78 +- .../service/s3/api_op_GetBucketVersioning.go | 76 +- .../service/s3/api_op_GetBucketWebsite.go | 79 +- .../service/s3/api_op_GetObject.go | 558 +- .../service/s3/api_op_GetObjectAcl.go | 119 +- .../service/s3/api_op_GetObjectAttributes.go | 335 +- .../service/s3/api_op_GetObjectLegalHold.go | 82 +- .../s3/api_op_GetObjectLockConfiguration.go | 79 +- .../service/s3/api_op_GetObjectRetention.go | 82 +- .../service/s3/api_op_GetObjectTagging.go | 119 +- .../service/s3/api_op_GetObjectTorrent.go | 83 +- .../service/s3/api_op_GetPublicAccessBlock.go | 90 +- .../service/s3/api_op_HeadBucket.go | 191 +- .../service/s3/api_op_HeadObject.go | 551 +- ...pi_op_ListBucketAnalyticsConfigurations.go | 101 +- ...tBucketIntelligentTieringConfigurations.go | 69 +- ...pi_op_ListBucketInventoryConfigurations.go | 97 +- .../api_op_ListBucketMetricsConfigurations.go | 98 +- .../service/s3/api_op_ListBuckets.go | 38 +- .../service/s3/api_op_ListDirectoryBuckets.go | 291 + .../service/s3/api_op_ListMultipartUploads.go | 280 +- .../service/s3/api_op_ListObjectVersions.go | 159 +- .../service/s3/api_op_ListObjects.go | 191 +- .../service/s3/api_op_ListObjectsV2.go | 313 +- .../service/s3/api_op_ListParts.go | 271 +- ...api_op_PutBucketAccelerateConfiguration.go | 110 +- .../service/s3/api_op_PutBucketAcl.go | 301 +- .../api_op_PutBucketAnalyticsConfiguration.go | 141 +- .../service/s3/api_op_PutBucketCors.go | 141 +- .../service/s3/api_op_PutBucketEncryption.go | 132 +- ...utBucketIntelligentTieringConfiguration.go | 106 +- .../api_op_PutBucketInventoryConfiguration.go | 153 +- .../api_op_PutBucketLifecycleConfiguration.go | 162 +- .../service/s3/api_op_PutBucketLogging.go | 148 +- .../api_op_PutBucketMetricsConfiguration.go | 109 +- ...i_op_PutBucketNotificationConfiguration.go | 104 +- .../s3/api_op_PutBucketOwnershipControls.go | 79 +- .../service/s3/api_op_PutBucketPolicy.go | 162 +- .../service/s3/api_op_PutBucketReplication.go | 146 +- .../s3/api_op_PutBucketRequestPayment.go | 98 +- .../service/s3/api_op_PutBucketTagging.go | 166 +- .../service/s3/api_op_PutBucketVersioning.go | 128 +- .../service/s3/api_op_PutBucketWebsite.go | 187 +- .../service/s3/api_op_PutObject.go | 578 +- .../service/s3/api_op_PutObjectAcl.go | 355 +- .../service/s3/api_op_PutObjectLegalHold.go | 93 +- .../s3/api_op_PutObjectLockConfiguration.go | 111 +- .../service/s3/api_op_PutObjectRetention.go | 113 +- .../service/s3/api_op_PutObjectTagging.go | 180 +- .../service/s3/api_op_PutPublicAccessBlock.go | 108 +- .../service/s3/api_op_RestoreObject.go | 436 +- .../service/s3/api_op_SelectObjectContent.go | 219 +- .../service/s3/api_op_UploadPart.go | 405 +- .../service/s3/api_op_UploadPartCopy.go | 453 +- .../s3/api_op_WriteGetObjectResponse.go | 204 +- .../aws/aws-sdk-go-v2/service/s3/auth.go | 318 + .../service/s3/bucket_context.go | 47 + .../aws/aws-sdk-go-v2/service/s3/bucketer.go | 15 + .../service/s3/create_mpu_checksum.go | 36 + .../aws-sdk-go-v2/service/s3/deserializers.go | 1015 +- .../service/s3/endpoint_auth_resolver.go | 115 + .../aws/aws-sdk-go-v2/service/s3/endpoints.go | 5598 +- .../aws/aws-sdk-go-v2/service/s3/express.go | 9 + .../service/s3/express_default.go | 170 + .../service/s3/express_resolve.go | 44 + .../aws-sdk-go-v2/service/s3/generated.json | 9 +- .../service/s3/go_module_metadata.go | 2 +- .../service/s3/handwritten_paginators.go | 214 + .../s3/internal/customizations/context.go | 21 + .../s3/internal/customizations/express.go | 44 + .../internal/customizations/express_config.go | 18 + .../express_default_checksum.go | 42 + .../customizations/express_properties.go | 21 + .../internal/customizations/express_signer.go | 109 + .../customizations/express_signer_smithy.go | 61 + .../customizations/process_arn_resource.go | 4 + .../remove_bucket_middleware.go | 5 + .../customizations/s3_object_lambda.go | 4 + .../internal/customizations/signer_wrapper.go | 62 +- .../customizations/update_endpoint.go | 8 +- .../s3/internal/endpoints/endpoints.go | 186 +- .../aws/aws-sdk-go-v2/service/s3/options.go | 314 + .../s3/serialize_immutable_hostname_bucket.go | 77 + .../aws-sdk-go-v2/service/s3/serializers.go | 2409 +- .../aws-sdk-go-v2/service/s3/types/enums.go | 239 +- .../aws-sdk-go-v2/service/s3/types/errors.go | 19 +- .../aws-sdk-go-v2/service/s3/types/types.go | 2629 +- .../aws-sdk-go-v2/service/s3/validators.go | 51 + .../aws/aws-sdk-go/aws/auth/bearer/token.go | 50 + .../aws/aws-sdk-go/aws/client/client.go | 13 +- .../aws/client/metadata/client_info.go | 19 +- .../github.com/aws/aws-sdk-go/aws/config.go | 101 +- .../aws/corehandlers/awsinternal.go | 4 + .../aws-sdk-go/aws/corehandlers/user_agent.go | 10 + .../aws/credentials/endpointcreds/provider.go | 47 +- .../aws/credentials/processcreds/provider.go | 24 +- .../aws/credentials/ssocreds/provider.go | 75 +- .../credentials/ssocreds/sso_cached_token.go | 237 + .../credentials/ssocreds/token_provider.go | 148 + .../stscreds/assume_role_provider.go | 12 +- .../stscreds/web_identity_provider.go | 40 +- .../aws/aws-sdk-go/aws/defaults/defaults.go | 65 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 10 +- .../aws/ec2metadata/token_provider.go | 28 +- .../aws/aws-sdk-go/aws/endpoints/decode.go | 69 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 48692 +++++++++++++--- .../aws/aws-sdk-go/aws/endpoints/doc.go | 55 +- .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 113 +- .../aws/aws-sdk-go/aws/endpoints/v3model.go | 297 +- .../aws/endpoints/v3model_codegen.go | 104 +- .../github.com/aws/aws-sdk-go/aws/logger.go | 3 + .../aws/aws-sdk-go/aws/request/handlers.go | 5 +- .../aws/aws-sdk-go/aws/request/request.go | 9 + .../aws/aws-sdk-go/aws/request/retryer.go | 4 +- .../aws/aws-sdk-go/aws/session/credentials.go | 69 +- .../aws/aws-sdk-go/aws/session/doc.go | 78 + .../aws/aws-sdk-go/aws/session/env_config.go | 96 + .../aws/aws-sdk-go/aws/session/session.go | 126 +- .../aws-sdk-go/aws/session/shared_config.go | 253 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 33 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/internal/ini/literal_tokens.go | 77 +- .../aws/aws-sdk-go/internal/ini/value_util.go | 2 +- .../aws/aws-sdk-go/internal/ini/visitor.go | 6 +- .../internal/shareddefaults/shared_config.go | 18 +- .../shared_config_resolve_home.go | 18 + .../shared_config_resolve_home_go1.12.go | 13 + .../private/protocol/json/jsonutil/build.go | 29 +- .../protocol/json/jsonutil/unmarshal.go | 13 + .../private/protocol/jsonrpc/jsonrpc.go | 5 +- .../protocol/jsonrpc/unmarshal_error.go | 63 +- .../protocol/query/queryutil/queryutil.go | 34 +- .../private/protocol/query/unmarshal_error.go | 3 +- .../aws-sdk-go/private/protocol/rest/build.go | 45 +- .../private/protocol/rest/payload.go | 11 +- .../private/protocol/rest/unmarshal.go | 27 +- .../protocol/restjson/unmarshal_error.go | 133 +- .../private/protocol/xml/xmlutil/build.go | 32 +- .../private/protocol/xml/xmlutil/unmarshal.go | 18 +- .../aws/aws-sdk-go/service/athena/api.go | 18593 ++++-- .../service/athena/athenaiface/interface.go | 200 +- .../aws/aws-sdk-go/service/athena/doc.go | 6 +- .../aws/aws-sdk-go/service/athena/errors.go | 17 +- .../aws/aws-sdk-go/service/athena/service.go | 38 +- .../aws/aws-sdk-go/service/sso/api.go | 209 +- .../aws/aws-sdk-go/service/sso/doc.go | 27 +- .../aws/aws-sdk-go/service/sso/service.go | 30 +- .../service/sso/ssoiface/interface.go | 46 +- .../aws/aws-sdk-go/service/ssooidc/api.go | 2252 + .../aws/aws-sdk-go/service/ssooidc/doc.go | 67 + .../aws/aws-sdk-go/service/ssooidc/errors.go | 115 + .../aws/aws-sdk-go/service/ssooidc/service.go | 106 + .../aws/aws-sdk-go/service/sts/api.go | 1148 +- .../aws/aws-sdk-go/service/sts/doc.go | 9 +- .../aws/aws-sdk-go/service/sts/errors.go | 2 +- .../aws/aws-sdk-go/service/sts/service.go | 34 +- .../service/sts/stsiface/interface.go | 46 +- vendor/github.com/aws/smithy-go/.gitignore | 4 + vendor/github.com/aws/smithy-go/CHANGELOG.md | 49 + vendor/github.com/aws/smithy-go/README.md | 15 + vendor/github.com/aws/smithy-go/auth/auth.go | 3 + .../github.com/aws/smithy-go/auth/identity.go | 47 + .../github.com/aws/smithy-go/auth/option.go | 25 + .../aws/smithy-go/auth/scheme_id.go | 20 + .../container/private/cache/cache.go | 19 + .../container/private/cache/lru/lru.go | 63 + .../smithy-go/encoding/httpbinding/encode.go | 13 +- .../aws/smithy-go/endpoints/endpoint.go | 23 + .../endpoints/private/rulesfn/doc.go | 4 + .../endpoints/private/rulesfn/strings.go | 25 + .../endpoints/private/rulesfn/uri.go | 130 + .../aws/smithy-go/go_module_metadata.go | 2 +- vendor/github.com/aws/smithy-go/properties.go | 62 + .../aws/smithy-go/transport/http/auth.go | 21 + .../smithy-go/transport/http/auth_schemes.go | 45 + .../http/middleware_header_comment.go | 81 + .../smithy-go/transport/http/properties.go | 80 + .../github.com/danieljoos/wincred/README.md | 2 +- vendor/github.com/danieljoos/wincred/sys.go | 29 +- .../github.com/danieljoos/wincred/wincred.go | 3 + .../github.com/dvsekhvalnov/jose2go/README.md | 97 +- .../github.com/dvsekhvalnov/jose2go/jose.go | 63 +- .../dvsekhvalnov/jose2go/keys/ecc/ecc.go | 78 +- .../dvsekhvalnov/jose2go/pbse2_hmac_aeskw.go | 51 +- .../github.com/elastic/go-sysinfo/.gitignore | 6 +- .../elastic/go-sysinfo/.golangci.yml | 16 + .../elastic/go-sysinfo/CHANGELOG.md | 153 - .../elastic/go-sysinfo/CONTRIBUTING.md | 16 + vendor/github.com/elastic/go-sysinfo/Makefile | 27 +- .../github.com/elastic/go-sysinfo/README.md | 9 +- .../go-sysinfo/internal/registry/registry.go | 6 +- .../providers/aix/boottime_aix_ppc64.go | 8 +- .../providers/aix/host_aix_ppc64.go | 27 +- .../providers/aix/kernel_aix_ppc64.go | 10 +- .../providers/aix/machineid_aix_ppc64.go | 7 +- .../go-sysinfo/providers/aix/os_aix_ppc64.go | 6 +- .../providers/aix/process_aix_ppc64.go | 27 +- .../providers/aix/ztypes_aix_ppc64.go | 1 - .../providers/darwin/arch_darwin.go | 11 +- .../providers/darwin/boottime_darwin.go | 13 +- .../providers/darwin/host_darwin.go | 62 +- .../providers/darwin/kernel_darwin.go | 6 +- .../providers/darwin/load_average_darwin.go | 44 + .../providers/darwin/machineid_darwin.go | 10 +- .../darwin/machineid_nocgo_darwin.go | 30 + .../providers/darwin/memory_darwin.go | 13 +- .../elastic/go-sysinfo/providers/darwin/os.go | 12 +- .../providers/darwin/process_cgo_darwin.go | 57 + .../providers/darwin/process_darwin.go | 163 +- .../providers/darwin/process_nocgo_darwin.go | 30 + .../providers/darwin/syscall_cgo_darwin.go | 71 + .../providers/darwin/syscall_darwin.go | 199 +- .../providers/darwin/syscall_nocgo_darwin.go | 39 + .../go-sysinfo/providers/linux/arch_linux.go | 5 +- .../providers/linux/boottime_linux.go | 2 +- .../providers/linux/capabilities_linux.go | 11 +- .../go-sysinfo/providers/linux/container.go | 5 +- .../go-sysinfo/providers/linux/host_linux.go | 33 +- .../providers/linux/kernel_linux.go | 5 +- .../go-sysinfo/providers/linux/machineid.go | 5 +- .../providers/linux/memory_linux.go | 6 +- .../elastic/go-sysinfo/providers/linux/os.go | 90 +- .../providers/linux/process_linux.go | 2 +- .../go-sysinfo/providers/linux/procnet.go | 13 +- .../providers/linux/seccomp_linux.go | 2 +- .../go-sysinfo/providers/linux/util.go | 36 +- .../go-sysinfo/providers/linux/vmstat.go | 7 +- .../go-sysinfo/providers/shared/fqdn.go | 77 + .../providers/windows/boottime_windows.go | 11 +- .../providers/windows/device_windows.go | 7 +- .../providers/windows/host_windows.go | 54 +- .../providers/windows/machineid_windows.go | 7 +- .../providers/windows/os_windows.go | 13 +- .../providers/windows/process_windows.go | 45 +- .../github.com/elastic/go-sysinfo/system.go | 1 + .../elastic/go-sysinfo/types/errors.go | 2 +- .../elastic/go-sysinfo/types/host.go | 9 +- .../elastic/go-sysinfo/types/process.go | 5 + .../elastic/go-windows/CHANGELOG.md | 9 +- vendor/github.com/elastic/go-windows/doc.go | 4 +- .../elastic/go-windows/zsyscall_windows.go | 10 +- .../github.com/felixge/httpsnoop/.gitignore | 0 .../github.com/felixge/httpsnoop/LICENSE.txt | 19 + vendor/github.com/felixge/httpsnoop/Makefile | 10 + vendor/github.com/felixge/httpsnoop/README.md | 95 + .../felixge/httpsnoop/capture_metrics.go | 86 + vendor/github.com/felixge/httpsnoop/docs.go | 10 + .../httpsnoop/wrap_generated_gteq_1.8.go | 436 + .../httpsnoop/wrap_generated_lt_1.8.go | 278 + .../gabriel-vasile/mimetype/README.md | 3 - .../mimetype/internal/magic/binary.go | 38 +- .../mimetype/internal/magic/magic.go | 4 +- .../mimetype/internal/magic/text_csv.go | 18 +- .../gabriel-vasile/mimetype/mimetype.go | 3 +- .../mimetype/supported_mimes.md | 2 +- .../github.com/go-faster/errors/.golangci.yml | 4 - vendor/github.com/go-faster/errors/README.md | 30 +- .../github.com/go-faster/errors/join_go120.go | 20 + .../github.com/go-logfmt/logfmt/CHANGELOG.md | 46 +- vendor/github.com/go-logfmt/logfmt/README.md | 30 +- vendor/github.com/go-logfmt/logfmt/decode.go | 17 + vendor/github.com/go-logr/logr/.golangci.yaml | 26 + vendor/github.com/go-logr/logr/CHANGELOG.md | 6 + .../github.com/go-logr/logr/CONTRIBUTING.md | 17 + .../logr}/LICENSE | 0 vendor/github.com/go-logr/logr/README.md | 406 + vendor/github.com/go-logr/logr/SECURITY.md | 18 + vendor/github.com/go-logr/logr/context.go | 33 + .../github.com/go-logr/logr/context_noslog.go | 49 + .../github.com/go-logr/logr/context_slog.go | 83 + vendor/github.com/go-logr/logr/discard.go | 24 + vendor/github.com/go-logr/logr/funcr/funcr.go | 911 + .../github.com/go-logr/logr/funcr/slogsink.go | 105 + vendor/github.com/go-logr/logr/logr.go | 520 + vendor/github.com/go-logr/logr/sloghandler.go | 192 + vendor/github.com/go-logr/logr/slogr.go | 100 + vendor/github.com/go-logr/logr/slogsink.go | 120 + .../{minio/c2goasm => go-logr/stdr}/LICENSE | 1 - vendor/github.com/go-logr/stdr/README.md | 6 + vendor/github.com/go-logr/stdr/stdr.go | 170 + vendor/github.com/goccy/go-json/CHANGELOG.md | 32 + .../goccy/go-json/internal/decoder/array.go | 4 +- .../goccy/go-json/internal/decoder/map.go | 2 +- .../goccy/go-json/internal/decoder/struct.go | 54 +- .../goccy/go-json/internal/encoder/code.go | 20 +- .../go-json/internal/encoder/compiler.go | 9 +- .../goccy/go-json/internal/encoder/opcode.go | 83 + .../goccy/go-json/internal/encoder/option.go | 1 + .../go-json/internal/encoder/vm/debug_vm.go | 6 + .../internal/encoder/vm_color_indent/util.go | 7 +- .../internal/encoder/vm_indent/util.go | 7 +- vendor/github.com/goccy/go-json/option.go | 7 + vendor/github.com/golang/snappy/.gitignore | 16 - vendor/github.com/golang/snappy/AUTHORS | 18 - vendor/github.com/golang/snappy/CONTRIBUTORS | 41 - vendor/github.com/golang/snappy/LICENSE | 27 - vendor/github.com/golang/snappy/README | 107 - vendor/github.com/golang/snappy/decode.go | 264 - .../github.com/golang/snappy/decode_amd64.s | 490 - .../github.com/golang/snappy/decode_arm64.s | 494 - vendor/github.com/golang/snappy/decode_asm.go | 15 - .../github.com/golang/snappy/decode_other.go | 115 - vendor/github.com/golang/snappy/encode.go | 289 - .../github.com/golang/snappy/encode_amd64.s | 730 - .../github.com/golang/snappy/encode_arm64.s | 722 - vendor/github.com/golang/snappy/encode_asm.go | 30 - .../github.com/golang/snappy/encode_other.go | 238 - vendor/github.com/golang/snappy/snappy.go | 98 - .../asm2plan9s => google/flatbuffers}/LICENSE | 0 .../github.com/google/flatbuffers/go/lib.go | 20 + vendor/github.com/google/s2a-go/README.md | 7 +- .../internal/handshaker/service/service.go | 53 +- .../s2a-go/internal/record/ticketsender.go | 8 +- .../google/s2a-go/internal/v2/s2av2.go | 105 +- .../github.com/google/s2a-go/retry/retry.go | 144 + vendor/github.com/google/s2a-go/s2a.go | 47 +- .../github.com/google/s2a-go/s2a_options.go | 7 + .../s2a-go/testdata/mds_client_cert.pem | 19 + .../google/s2a-go/testdata/mds_client_key.pem | 28 + .../google/s2a-go/testdata/mds_root_cert.pem | 21 + .../s2a-go/testdata/mds_server_cert.pem | 21 + .../google/s2a-go/testdata/mds_server_key.pem | 28 + .../s2a-go/testdata/self_signed_cert.pem | 19 + .../s2a-go/testdata/self_signed_key.pem | 28 + vendor/github.com/google/uuid/.travis.yml | 9 - vendor/github.com/google/uuid/CHANGELOG.md | 41 + vendor/github.com/google/uuid/CONTRIBUTING.md | 16 + vendor/github.com/google/uuid/README.md | 10 +- vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/node_js.go | 2 +- vendor/github.com/google/uuid/time.go | 21 +- vendor/github.com/google/uuid/uuid.go | 89 +- vendor/github.com/google/uuid/version6.go | 56 + vendor/github.com/google/uuid/version7.go | 104 + .../client/client.go | 42 +- .../client/util/util.go | 9 + .../gax-go/v2/.release-please-manifest.json | 2 +- .../googleapis/gax-go/v2/CHANGES.md | 8 + .../googleapis/gax-go/v2/callctx/callctx.go | 74 + .../github.com/googleapis/gax-go/v2/header.go | 49 + .../googleapis/gax-go/v2/internal/version.go | 2 +- vendor/github.com/jackc/pgconn/CHANGELOG.md | 6 + vendor/github.com/jackc/pgconn/config.go | 6 +- vendor/github.com/jackc/pgconn/pgconn.go | 20 +- .../jackc/pgservicefile/.travis.yml | 9 - .../github.com/jackc/pgservicefile/README.md | 5 +- vendor/github.com/jackc/pgtype/CHANGELOG.md | 6 + vendor/github.com/jackc/pgtype/array.go | 2 +- .../github.com/jackc/pgtype/composite_type.go | 10 +- vendor/github.com/jackc/pgtype/convert.go | 9 +- .../github.com/jackc/pgtype/database_sql.go | 3 +- vendor/github.com/jackc/pgtype/hstore.go | 2 +- vendor/github.com/jackc/pgtype/int8.go | 6 - vendor/github.com/jackc/pgtype/json.go | 5 +- vendor/github.com/jackc/pgtype/numeric.go | 30 +- vendor/github.com/jackc/pgtype/point.go | 2 +- vendor/github.com/jackc/pgtype/uuid.go | 38 +- vendor/github.com/klauspost/asmfmt/.gitignore | 24 - .../klauspost/asmfmt/.goreleaser.yml | 77 - vendor/github.com/klauspost/asmfmt/LICENSE | 22 - vendor/github.com/klauspost/asmfmt/README.md | 113 - vendor/github.com/klauspost/asmfmt/asmfmt.go | 652 - .../klauspost/asmfmt/cmd/asmfmt/LICENSE | 27 - .../klauspost/asmfmt/cmd/asmfmt/doc.go | 43 - .../klauspost/asmfmt/cmd/asmfmt/main.go | 203 - .../klauspost/compress/.goreleaser.yml | 20 +- .../github.com/klauspost/compress/README.md | 83 +- .../github.com/klauspost/compress/SECURITY.md | 25 + .../klauspost/compress/flate/deflate.go | 989 - .../klauspost/compress/flate/dict_decoder.go | 184 - .../klauspost/compress/flate/fast_encoder.go | 216 - .../compress/flate/huffman_bit_writer.go | 1187 - .../klauspost/compress/flate/huffman_code.go | 417 - .../compress/flate/huffman_sortByFreq.go | 178 - .../compress/flate/huffman_sortByLiteral.go | 201 - .../klauspost/compress/flate/inflate.go | 793 - .../klauspost/compress/flate/inflate_gen.go | 1283 - .../klauspost/compress/flate/level1.go | 241 - .../klauspost/compress/flate/level2.go | 214 - .../klauspost/compress/flate/level3.go | 241 - .../klauspost/compress/flate/level4.go | 221 - .../klauspost/compress/flate/level5.go | 310 - .../klauspost/compress/flate/level6.go | 325 - .../klauspost/compress/flate/regmask_amd64.go | 37 - .../klauspost/compress/flate/regmask_other.go | 40 - .../klauspost/compress/flate/stateless.go | 318 - .../klauspost/compress/flate/token.go | 379 - .../klauspost/compress/fse/bitwriter.go | 3 +- .../klauspost/compress/fse/compress.go | 5 +- .../klauspost/compress/fse/decompress.go | 4 +- .../klauspost/compress/gzip/gunzip.go | 349 - .../klauspost/compress/gzip/gzip.go | 269 - .../klauspost/compress/huff0/bitwriter.go | 27 +- .../klauspost/compress/huff0/bytereader.go | 44 - .../klauspost/compress/huff0/compress.go | 28 +- .../klauspost/compress/huff0/decompress.go | 4 +- .../klauspost/compress/huff0/huff0.go | 4 +- .../compress/internal/snapref/encode_other.go | 34 +- vendor/github.com/klauspost/compress/s2sx.mod | 2 +- .../klauspost/compress/zstd/README.md | 4 +- .../klauspost/compress/zstd/bitreader.go | 34 +- .../klauspost/compress/zstd/bitwriter.go | 3 +- .../klauspost/compress/zstd/blockdec.go | 6 +- .../klauspost/compress/zstd/blockenc.go | 38 +- .../klauspost/compress/zstd/bytebuf.go | 4 +- .../klauspost/compress/zstd/decodeheader.go | 56 +- .../klauspost/compress/zstd/decoder.go | 7 +- .../compress/zstd/decoder_options.go | 2 +- .../klauspost/compress/zstd/dict.go | 422 +- .../klauspost/compress/zstd/enc_base.go | 3 +- .../klauspost/compress/zstd/enc_best.go | 297 +- .../klauspost/compress/zstd/enc_better.go | 17 +- .../klauspost/compress/zstd/enc_dfast.go | 2 +- .../klauspost/compress/zstd/enc_fast.go | 17 +- .../klauspost/compress/zstd/encoder.go | 91 +- .../compress/zstd/encoder_options.go | 12 +- .../klauspost/compress/zstd/framedec.go | 43 +- .../klauspost/compress/zstd/frameenc.go | 6 +- .../compress/zstd/fse_decoder_generic.go | 11 +- .../klauspost/compress/zstd/matchlen_amd64.go | 16 + .../klauspost/compress/zstd/matchlen_amd64.s | 68 + .../compress/zstd/matchlen_generic.go | 33 + .../klauspost/compress/zstd/seqdec.go | 28 +- .../klauspost/compress/zstd/seqdec_amd64.go | 17 +- .../klauspost/compress/zstd/seqdec_amd64.s | 388 +- .../klauspost/compress/zstd/seqdec_generic.go | 2 +- .../klauspost/compress/zstd/snappy.go | 5 +- .../klauspost/compress/zstd/zstd.go | 26 +- .../github.com/klauspost/cpuid/v2/README.md | 28 +- vendor/github.com/klauspost/cpuid/v2/cpuid.go | 86 +- .../klauspost/cpuid/v2/detect_x86.go | 1 + .../klauspost/cpuid/v2/featureid_string.go | 401 +- .../golang_protobuf_extensions/NOTICE | 1 - .../pbutil/.gitignore | 1 - .../pbutil/Makefile | 7 - .../pbutil/decode.go | 75 - .../pbutil/encode.go | 46 - vendor/github.com/minio/asm2plan9s/.gitignore | 2 - vendor/github.com/minio/asm2plan9s/README.md | 91 - .../github.com/minio/asm2plan9s/asm2plan9s.go | 252 - .../minio/asm2plan9s/asm2plan9s_amd64.go | 192 - .../minio/asm2plan9s/asm2plan9s_arm64.go | 122 - vendor/github.com/minio/asm2plan9s/example.s | 1 - vendor/github.com/minio/asm2plan9s/neon.asm | 1 - vendor/github.com/minio/asm2plan9s/yasm.go | 176 - vendor/github.com/minio/c2goasm/README.md | 203 - vendor/github.com/minio/c2goasm/arguments.go | 122 - vendor/github.com/minio/c2goasm/assembly.go | 412 - vendor/github.com/minio/c2goasm/c2goasm.go | 300 - vendor/github.com/minio/c2goasm/constants.go | 255 - vendor/github.com/minio/c2goasm/epilogue.go | 219 - vendor/github.com/minio/c2goasm/subroutine.go | 294 - vendor/github.com/paulmach/orb/CHANGELOG.md | 38 + vendor/github.com/pierrec/lz4/v4/README.md | 2 +- .../pierrec/lz4/v4/compressing_reader.go | 222 + .../lz4/v4/internal/lz4block/blocks.go | 5 +- .../lz4/v4/internal/lz4block/decode_arm64.s | 15 +- .../lz4/v4/internal/lz4stream/block.go | 4 +- vendor/github.com/pierrec/lz4/v4/options.go | 28 + vendor/github.com/pierrec/lz4/v4/writer.go | 4 + .../client_golang/prometheus/histogram.go | 56 +- .../client_golang/prometheus/labels.go | 2 + .../prometheus/process_collector_other.go | 4 +- .../prometheus/process_collector_wasip1.go | 26 + .../prometheus/client_model/go/metrics.pb.go | 3 + .../prometheus/common/expfmt/decode.go | 9 +- .../prometheus/common/expfmt/encode.go | 7 +- .../prometheus/common/expfmt/text_parse.go | 8 +- .../prometheus/common/model/alert.go | 4 +- .../prometheus/common/model/metadata.go | 28 + .../prometheus/common/model/metric.go | 10 +- .../prometheus/common/model/signature.go | 6 +- .../prometheus/common/model/silence.go | 2 +- .../prometheus/common/model/value.go | 16 +- .../prometheus/common/model/value_float.go | 14 +- .../prometheus/common/version/info.go | 12 +- .../prometheus/common/version/info_default.go | 2 +- .../prometheus/common/version/info_go118.go | 10 +- .../prometheus/procfs/Makefile.common | 2 +- .../prometheus/procfs/fs_statfs_notype.go | 4 +- .../prometheus/procfs/fs_statfs_type.go | 4 +- .../prometheus/procfs/mountstats.go | 83 +- .../prometheus/procfs/proc_fdinfo.go | 8 +- .../github.com/prometheus/procfs/proc_maps.go | 20 +- .../prometheus/procfs/proc_status.go | 21 +- .../github.com/segmentio/go-athena/README.md | 3 + vendor/github.com/sirupsen/logrus/README.md | 8 +- vendor/github.com/sirupsen/logrus/writer.go | 34 +- .../snowflakedb/gosnowflake/README.md | 9 + .../snowflakedb/gosnowflake/arrow_chunk.go | 16 +- .../snowflakedb/gosnowflake/async.go | 64 +- .../snowflakedb/gosnowflake/auth.go | 162 +- .../gosnowflake/authexternalbrowser.go | 59 +- .../snowflakedb/gosnowflake/authokta.go | 8 +- .../gosnowflake/azure_storage_client.go | 7 +- .../snowflakedb/gosnowflake/bind_uploader.go | 28 +- .../snowflakedb/gosnowflake/cacert.go | 1908 +- .../gosnowflake/chunk_downloader.go | 47 +- .../snowflakedb/gosnowflake/client.go | 6 +- .../gosnowflake/client_configuration.go | 148 + .../snowflakedb/gosnowflake/codecov.yml | 3 + .../snowflakedb/gosnowflake/connection.go | 153 +- .../gosnowflake/connection_util.go | 38 +- .../snowflakedb/gosnowflake/converter.go | 417 +- .../snowflakedb/gosnowflake/data1.txt.gz | 0 .../snowflakedb/gosnowflake/datatype.go | 48 +- .../github.com/snowflakedb/gosnowflake/doc.go | 159 +- .../snowflakedb/gosnowflake/driver.go | 10 +- .../github.com/snowflakedb/gosnowflake/dsn.go | 250 +- .../snowflakedb/gosnowflake/easy_logging.go | 154 + .../snowflakedb/gosnowflake/encrypt_util.go | 7 +- .../snowflakedb/gosnowflake/errors.go | 57 +- .../gosnowflake/file_transfer_agent.go | 90 +- .../snowflakedb/gosnowflake/file_util.go | 7 +- .../gosnowflake/gcs_storage_client.go | 38 +- .../snowflakedb/gosnowflake/heartbeat.go | 2 +- .../snowflakedb/gosnowflake/htap.go | 94 + .../gosnowflake/local_storage_client.go | 4 +- .../snowflakedb/gosnowflake/location.go | 2 +- .../github.com/snowflakedb/gosnowflake/log.go | 199 +- .../snowflakedb/gosnowflake/monitoring.go | 9 +- .../snowflakedb/gosnowflake/multistatement.go | 2 +- .../snowflakedb/gosnowflake/ocsp.go | 271 +- .../snowflakedb/gosnowflake/query.go | 21 + .../snowflakedb/gosnowflake/restful.go | 64 +- .../snowflakedb/gosnowflake/result.go | 18 + .../snowflakedb/gosnowflake/retry.go | 297 +- .../snowflakedb/gosnowflake/rows.go | 20 +- .../gosnowflake/s3_storage_client.go | 31 +- .../snowflakedb/gosnowflake/statement.go | 77 +- .../snowflakedb/gosnowflake/storage_client.go | 7 +- .../snowflakedb/gosnowflake/telemetry.go | 2 +- .../snowflakedb/gosnowflake/util.go | 79 +- .../snowflakedb/gosnowflake/version.go | 2 +- .../vertica/vertica-sql-go/CONTRIBUTING.md | 13 +- .../vertica/vertica-sql-go/README.md | 3 +- .../vertica/vertica-sql-go/connection.go | 19 +- .../vertica/vertica-sql-go/driver.go | 4 +- .../vertica-sql-go/msgs/festartupmsg.go | 18 +- .../net/http/otelhttp/LICENSE} | 1 - .../net/http/otelhttp/client.go | 61 + .../net/http/otelhttp/common.go | 46 + .../net/http/otelhttp/config.go | 207 + .../instrumentation/net/http/otelhttp/doc.go | 18 + .../net/http/otelhttp/handler.go | 283 + .../http/otelhttp/internal/semconvutil/gen.go | 21 + .../otelhttp/internal/semconvutil/httpconv.go | 602 + .../otelhttp/internal/semconvutil/netconv.go | 378 + .../net/http/otelhttp/labeler.go | 65 + .../net/http/otelhttp/transport.go | 193 + .../net/http/otelhttp/version.go | 28 + .../instrumentation/net/http/otelhttp/wrap.go | 99 + .../go.opentelemetry.io/otel/.codespellignore | 5 + vendor/go.opentelemetry.io/otel/.codespellrc | 10 + .../go.opentelemetry.io/otel/.gitattributes | 3 + vendor/go.opentelemetry.io/otel/.gitignore | 22 + vendor/go.opentelemetry.io/otel/.gitmodules | 3 + vendor/go.opentelemetry.io/otel/.golangci.yml | 296 + vendor/go.opentelemetry.io/otel/.lycheeignore | 6 + .../otel/.markdownlint.yaml | 29 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 2859 + vendor/go.opentelemetry.io/otel/CODEOWNERS | 17 + .../go.opentelemetry.io/otel/CONTRIBUTING.md | 645 + vendor/go.opentelemetry.io/otel/Makefile | 318 + vendor/go.opentelemetry.io/otel/README.md | 108 + vendor/go.opentelemetry.io/otel/RELEASING.md | 139 + vendor/go.opentelemetry.io/otel/VERSIONING.md | 224 + .../otel/attribute/filter.go | 60 + .../go.opentelemetry.io/otel/attribute/set.go | 114 +- .../otel/attribute/value.go | 16 +- .../otel/baggage/baggage.go | 744 + .../otel/baggage/context.go | 39 + .../go.opentelemetry.io/otel/baggage/doc.go | 20 + vendor/go.opentelemetry.io/otel/codes/doc.go | 2 +- vendor/go.opentelemetry.io/otel/doc.go | 34 + .../go.opentelemetry.io/otel/error_handler.go | 38 + .../go.opentelemetry.io/otel/get_main_pkgs.sh | 41 + vendor/go.opentelemetry.io/otel/handler.go | 48 + .../otel/internal/attribute/attribute.go | 82 +- .../otel/internal/baggage/baggage.go | 43 + .../otel/internal/baggage/context.go | 92 + .../go.opentelemetry.io/otel/internal/gen.go | 29 + .../otel/internal/global/handler.go | 102 + .../otel/internal/global/instruments.go | 371 + .../otel/internal/global/internal_logging.go | 69 + .../otel/internal/global/meter.go | 354 + .../otel/internal/global/propagator.go | 82 + .../otel/internal/global/state.go | 156 + .../otel/internal/global/trace.go | 199 + .../otel/internal_logging.go | 26 + vendor/go.opentelemetry.io/otel/metric.go | 53 + .../otel/metric}/LICENSE | 105 - .../otel/metric/asyncfloat64.go | 271 + .../otel/metric/asyncint64.go | 269 + .../go.opentelemetry.io/otel/metric/config.go | 92 + vendor/go.opentelemetry.io/otel/metric/doc.go | 170 + .../otel/metric/embedded/embedded.go | 234 + .../otel/metric/instrument.go | 357 + .../go.opentelemetry.io/otel/metric/meter.go | 212 + .../otel/metric/syncfloat64.go | 185 + .../otel/metric/syncint64.go | 185 + .../go.opentelemetry.io/otel/propagation.go | 31 + .../otel/propagation/baggage.go | 58 + .../otel/propagation/doc.go | 24 + .../otel/propagation/propagation.go | 153 + .../otel/propagation/trace_context.go | 167 + .../go.opentelemetry.io/otel/requirements.txt | 1 + .../otel/semconv/v1.20.0/attribute_group.go | 1209 + .../otel/semconv/v1.20.0/doc.go | 20 + .../otel/semconv/v1.20.0/event.go | 199 + .../otel/semconv/v1.20.0/exception.go | 20 + .../otel/semconv/v1.20.0/http.go | 21 + .../otel/semconv/v1.20.0/resource.go | 2071 + .../otel/semconv/v1.20.0/schema.go | 20 + .../otel/semconv/v1.20.0/trace.go | 2610 + vendor/go.opentelemetry.io/otel/trace.go | 47 + .../go.opentelemetry.io/otel/trace/config.go | 18 + vendor/go.opentelemetry.io/otel/trace/doc.go | 64 + .../otel/trace/embedded/embedded.go | 56 + vendor/go.opentelemetry.io/otel/trace/noop.go | 14 +- .../go.opentelemetry.io/otel/trace/trace.go | 40 +- .../otel/trace/tracestate.go | 199 +- .../otel/verify_examples.sh | 85 + .../otel/version.go} | 10 +- vendor/go.opentelemetry.io/otel/versions.yaml | 52 + .../x/crypto/chacha20/chacha_arm64.go | 1 - .../x/crypto/chacha20/chacha_arm64.s | 1 - .../x/crypto/chacha20/chacha_noasm.go | 1 - .../x/crypto/chacha20/chacha_ppc64le.go | 1 - .../x/crypto/chacha20/chacha_ppc64le.s | 1 - .../x/crypto/chacha20/chacha_s390x.go | 1 - .../x/crypto/chacha20/chacha_s390x.s | 1 - .../chacha20poly1305_amd64.go | 1 - .../chacha20poly1305/chacha20poly1305_amd64.s | 25 +- .../chacha20poly1305_noasm.go | 1 - vendor/golang.org/x/crypto/cryptobyte/asn1.go | 13 +- vendor/golang.org/x/crypto/hkdf/hkdf.go | 4 +- .../x/crypto/internal/alias/alias.go | 1 - .../x/crypto/internal/alias/alias_purego.go | 1 - .../x/crypto/internal/poly1305/bits_compat.go | 40 - .../x/crypto/internal/poly1305/bits_go1.13.go | 22 - .../x/crypto/internal/poly1305/mac_noasm.go | 1 - .../x/crypto/internal/poly1305/sum_amd64.go | 1 - .../x/crypto/internal/poly1305/sum_amd64.s | 1 - .../x/crypto/internal/poly1305/sum_generic.go | 43 +- .../x/crypto/internal/poly1305/sum_ppc64le.go | 1 - .../x/crypto/internal/poly1305/sum_ppc64le.s | 1 - .../x/crypto/internal/poly1305/sum_s390x.go | 1 - .../x/crypto/internal/poly1305/sum_s390x.s | 1 - vendor/golang.org/x/exp/slices/cmp.go | 44 + vendor/golang.org/x/exp/slices/slices.go | 417 +- vendor/golang.org/x/exp/slices/sort.go | 125 +- .../slices/{zsortfunc.go => zsortanyfunc.go} | 154 +- .../golang.org/x/exp/slices/zsortordered.go | 34 +- .../x/mod/internal/lazyregexp/lazyre.go | 2 +- vendor/golang.org/x/mod/module/module.go | 30 +- vendor/golang.org/x/mod/module/pseudo.go | 2 +- vendor/golang.org/x/mod/semver/semver.go | 6 +- vendor/golang.org/x/net/context/context.go | 56 - vendor/golang.org/x/net/context/go17.go | 73 - vendor/golang.org/x/net/context/go19.go | 21 - vendor/golang.org/x/net/context/pre_go17.go | 301 - vendor/golang.org/x/net/context/pre_go19.go | 110 - vendor/golang.org/x/net/http2/databuffer.go | 59 +- vendor/golang.org/x/net/http2/go111.go | 30 - vendor/golang.org/x/net/http2/go115.go | 27 - vendor/golang.org/x/net/http2/go118.go | 17 - vendor/golang.org/x/net/http2/not_go111.go | 21 - vendor/golang.org/x/net/http2/not_go115.go | 31 - vendor/golang.org/x/net/http2/not_go118.go | 17 - vendor/golang.org/x/net/http2/server.go | 24 +- vendor/golang.org/x/net/http2/transport.go | 33 +- vendor/golang.org/x/net/idna/go118.go | 1 - vendor/golang.org/x/net/idna/idna10.0.0.go | 1 - vendor/golang.org/x/net/idna/idna9.0.0.go | 1 - vendor/golang.org/x/net/idna/pre_go118.go | 1 - vendor/golang.org/x/net/idna/tables10.0.0.go | 1 - vendor/golang.org/x/net/idna/tables11.0.0.go | 1 - vendor/golang.org/x/net/idna/tables12.0.0.go | 1 - vendor/golang.org/x/net/idna/tables13.0.0.go | 1 - vendor/golang.org/x/net/idna/tables15.0.0.go | 1 - vendor/golang.org/x/net/idna/tables9.0.0.go | 1 - vendor/golang.org/x/net/idna/trie12.0.0.go | 1 - vendor/golang.org/x/net/idna/trie13.0.0.go | 1 - vendor/golang.org/x/oauth2/deviceauth.go | 198 + .../x/oauth2/google/appengine_gen1.go | 1 - .../x/oauth2/google/appengine_gen2_flex.go | 1 - vendor/golang.org/x/oauth2/google/default.go | 103 +- vendor/golang.org/x/oauth2/google/doc.go | 2 + vendor/golang.org/x/oauth2/google/google.go | 46 +- .../google/internal/externalaccount/aws.go | 47 +- .../externalaccount/basecredentials.go | 45 +- .../externalaccount/executablecredsource.go | 4 + .../externalaccount/filecredsource.go | 4 + .../google/internal/externalaccount/header.go | 64 + .../internal/externalaccount/urlcredsource.go | 4 + .../externalaccountauthorizeduser.go | 114 + .../clientauth.go | 8 +- .../sts_exchange.go | 42 +- .../x/oauth2/internal/client_appengine.go | 1 - vendor/golang.org/x/oauth2/internal/token.go | 70 +- vendor/golang.org/x/oauth2/oauth2.go | 33 +- vendor/golang.org/x/oauth2/pkce.go | 68 + vendor/golang.org/x/oauth2/token.go | 2 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 3 + vendor/golang.org/x/sync/errgroup/go120.go | 1 - .../golang.org/x/sync/errgroup/pre_go120.go | 1 - vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 1 - vendor/golang.org/x/sys/cpu/cpu_aix.go | 1 - vendor/golang.org/x/sys/cpu/cpu_arm64.s | 1 - vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 1 - vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 1 - vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 2 - .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 1 - .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 1 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 2 - vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 2 - vendor/golang.org/x/sys/cpu/cpu_linux.go | 1 - .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 2 - .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 1 - .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 2 - vendor/golang.org/x/sys/cpu/cpu_loong64.go | 1 - vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 1 - vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 1 - vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 1 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 1 - .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 2 - .../golang.org/x/sys/cpu/cpu_other_ppc64x.go | 3 - .../golang.org/x/sys/cpu/cpu_other_riscv64.go | 1 - vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 1 - vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 1 - vendor/golang.org/x/sys/cpu/cpu_s390x.s | 1 - vendor/golang.org/x/sys/cpu/cpu_wasm.go | 1 - vendor/golang.org/x/sys/cpu/cpu_x86.go | 1 - vendor/golang.org/x/sys/cpu/cpu_x86.s | 2 - vendor/golang.org/x/sys/cpu/endian_big.go | 1 - vendor/golang.org/x/sys/cpu/endian_little.go | 1 - .../x/sys/cpu/proc_cpuinfo_linux.go | 1 - .../x/sys/cpu/runtime_auxv_go121.go | 1 - .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 1 - .../x/sys/cpu/syscall_aix_ppc64_gc.go | 1 - vendor/golang.org/x/sys/execabs/execabs.go | 102 - .../golang.org/x/sys/execabs/execabs_go118.go | 18 - .../golang.org/x/sys/execabs/execabs_go119.go | 21 - .../golang.org/x/sys/plan9/pwd_go15_plan9.go | 1 - vendor/golang.org/x/sys/plan9/pwd_plan9.go | 1 - vendor/golang.org/x/sys/plan9/race.go | 1 - vendor/golang.org/x/sys/plan9/race0.go | 1 - vendor/golang.org/x/sys/plan9/str.go | 1 - vendor/golang.org/x/sys/plan9/syscall.go | 1 - .../x/sys/plan9/zsyscall_plan9_386.go | 1 - .../x/sys/plan9/zsyscall_plan9_amd64.go | 1 - .../x/sys/plan9/zsyscall_plan9_arm.go | 1 - vendor/golang.org/x/sys/unix/aliases.go | 2 - vendor/golang.org/x/sys/unix/asm_aix_ppc64.s | 1 - vendor/golang.org/x/sys/unix/asm_bsd_386.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_amd64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_arm64.s | 2 - vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s | 2 - .../golang.org/x/sys/unix/asm_bsd_riscv64.s | 2 - vendor/golang.org/x/sys/unix/asm_linux_386.s | 1 - .../golang.org/x/sys/unix/asm_linux_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_linux_arm.s | 1 - .../golang.org/x/sys/unix/asm_linux_arm64.s | 3 - .../golang.org/x/sys/unix/asm_linux_loong64.s | 3 - .../golang.org/x/sys/unix/asm_linux_mips64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_mipsx.s | 3 - .../golang.org/x/sys/unix/asm_linux_ppc64x.s | 3 - .../golang.org/x/sys/unix/asm_linux_riscv64.s | 2 - .../golang.org/x/sys/unix/asm_linux_s390x.s | 3 - .../x/sys/unix/asm_openbsd_mips64.s | 1 - .../golang.org/x/sys/unix/asm_solaris_amd64.s | 1 - vendor/golang.org/x/sys/unix/asm_zos_s390x.s | 3 - vendor/golang.org/x/sys/unix/cap_freebsd.go | 1 - vendor/golang.org/x/sys/unix/constants.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc.go | 1 - vendor/golang.org/x/sys/unix/dev_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/dev_zos.go | 1 - vendor/golang.org/x/sys/unix/dirent.go | 1 - vendor/golang.org/x/sys/unix/endian_big.go | 1 - vendor/golang.org/x/sys/unix/endian_little.go | 1 - vendor/golang.org/x/sys/unix/env_unix.go | 1 - vendor/golang.org/x/sys/unix/epoll_zos.go | 1 - vendor/golang.org/x/sys/unix/fcntl.go | 3 +- .../x/sys/unix/fcntl_linux_32bit.go | 1 - vendor/golang.org/x/sys/unix/fdset.go | 1 - vendor/golang.org/x/sys/unix/fstatfs_zos.go | 1 - vendor/golang.org/x/sys/unix/gccgo.go | 1 - vendor/golang.org/x/sys/unix/gccgo_c.c | 1 - .../x/sys/unix/gccgo_linux_amd64.go | 1 - vendor/golang.org/x/sys/unix/ifreq_linux.go | 1 - vendor/golang.org/x/sys/unix/ioctl_linux.go | 5 + vendor/golang.org/x/sys/unix/ioctl_signed.go | 1 - .../golang.org/x/sys/unix/ioctl_unsigned.go | 1 - vendor/golang.org/x/sys/unix/ioctl_zos.go | 1 - vendor/golang.org/x/sys/unix/mkerrors.sh | 41 +- vendor/golang.org/x/sys/unix/mmap_nomremap.go | 1 - vendor/golang.org/x/sys/unix/mremap.go | 1 - vendor/golang.org/x/sys/unix/pagesize_unix.go | 1 - .../golang.org/x/sys/unix/pledge_openbsd.go | 92 +- vendor/golang.org/x/sys/unix/ptrace_darwin.go | 1 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 1 - vendor/golang.org/x/sys/unix/race.go | 1 - vendor/golang.org/x/sys/unix/race0.go | 1 - .../x/sys/unix/readdirent_getdents.go | 1 - .../x/sys/unix/readdirent_getdirentries.go | 1 - vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 1 - .../x/sys/unix/sockcmsg_unix_other.go | 1 - vendor/golang.org/x/sys/unix/syscall.go | 1 - vendor/golang.org/x/sys/unix/syscall_aix.go | 4 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 1 - .../x/sys/unix/syscall_aix_ppc64.go | 1 - vendor/golang.org/x/sys/unix/syscall_bsd.go | 3 +- .../x/sys/unix/syscall_darwin_amd64.go | 1 - .../x/sys/unix/syscall_darwin_arm64.go | 1 - .../x/sys/unix/syscall_darwin_libSystem.go | 1 - .../x/sys/unix/syscall_dragonfly_amd64.go | 1 - .../x/sys/unix/syscall_freebsd_386.go | 1 - .../x/sys/unix/syscall_freebsd_amd64.go | 1 - .../x/sys/unix/syscall_freebsd_arm.go | 1 - .../x/sys/unix/syscall_freebsd_arm64.go | 1 - .../x/sys/unix/syscall_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 - .../golang.org/x/sys/unix/syscall_hurd_386.go | 1 - .../golang.org/x/sys/unix/syscall_illumos.go | 1 - vendor/golang.org/x/sys/unix/syscall_linux.go | 33 +- .../x/sys/unix/syscall_linux_386.go | 1 - .../x/sys/unix/syscall_linux_alarm.go | 2 - .../x/sys/unix/syscall_linux_amd64.go | 1 - .../x/sys/unix/syscall_linux_amd64_gc.go | 1 - .../x/sys/unix/syscall_linux_arm.go | 1 - .../x/sys/unix/syscall_linux_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_linux_gc.go | 1 - .../x/sys/unix/syscall_linux_gc_386.go | 1 - .../x/sys/unix/syscall_linux_gc_arm.go | 1 - .../x/sys/unix/syscall_linux_gccgo_386.go | 1 - .../x/sys/unix/syscall_linux_gccgo_arm.go | 1 - .../x/sys/unix/syscall_linux_loong64.go | 1 - .../x/sys/unix/syscall_linux_mips64x.go | 2 - .../x/sys/unix/syscall_linux_mipsx.go | 2 - .../x/sys/unix/syscall_linux_ppc.go | 1 - .../x/sys/unix/syscall_linux_ppc64x.go | 2 - .../x/sys/unix/syscall_linux_riscv64.go | 1 - .../x/sys/unix/syscall_linux_s390x.go | 1 - .../x/sys/unix/syscall_linux_sparc64.go | 1 - .../x/sys/unix/syscall_netbsd_386.go | 1 - .../x/sys/unix/syscall_netbsd_amd64.go | 1 - .../x/sys/unix/syscall_netbsd_arm.go | 1 - .../x/sys/unix/syscall_netbsd_arm64.go | 1 - .../golang.org/x/sys/unix/syscall_openbsd.go | 28 +- .../x/sys/unix/syscall_openbsd_386.go | 1 - .../x/sys/unix/syscall_openbsd_amd64.go | 1 - .../x/sys/unix/syscall_openbsd_arm.go | 1 - .../x/sys/unix/syscall_openbsd_arm64.go | 1 - .../x/sys/unix/syscall_openbsd_libc.go | 1 - .../x/sys/unix/syscall_openbsd_ppc64.go | 1 - .../x/sys/unix/syscall_openbsd_riscv64.go | 1 - .../golang.org/x/sys/unix/syscall_solaris.go | 5 +- .../x/sys/unix/syscall_solaris_amd64.go | 1 - vendor/golang.org/x/sys/unix/syscall_unix.go | 1 - .../golang.org/x/sys/unix/syscall_unix_gc.go | 2 - .../x/sys/unix/syscall_unix_gc_ppc64x.go | 3 - .../x/sys/unix/syscall_zos_s390x.go | 3 +- vendor/golang.org/x/sys/unix/sysvshm_linux.go | 1 - vendor/golang.org/x/sys/unix/sysvshm_unix.go | 1 - .../x/sys/unix/sysvshm_unix_other.go | 1 - vendor/golang.org/x/sys/unix/timestruct.go | 1 - .../golang.org/x/sys/unix/unveil_openbsd.go | 41 +- vendor/golang.org/x/sys/unix/xattr_bsd.go | 1 - .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 1 - .../x/sys/unix/zerrors_aix_ppc64.go | 1 - .../x/sys/unix/zerrors_darwin_amd64.go | 1 - .../x/sys/unix/zerrors_darwin_arm64.go | 1 - .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_386.go | 1 - .../x/sys/unix/zerrors_freebsd_amd64.go | 1 - .../x/sys/unix/zerrors_freebsd_arm.go | 1 - .../x/sys/unix/zerrors_freebsd_arm64.go | 1 - .../x/sys/unix/zerrors_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 68 +- .../x/sys/unix/zerrors_linux_386.go | 1 - .../x/sys/unix/zerrors_linux_amd64.go | 1 - .../x/sys/unix/zerrors_linux_arm.go | 1 - .../x/sys/unix/zerrors_linux_arm64.go | 1 - .../x/sys/unix/zerrors_linux_loong64.go | 2 +- .../x/sys/unix/zerrors_linux_mips.go | 1 - .../x/sys/unix/zerrors_linux_mips64.go | 1 - .../x/sys/unix/zerrors_linux_mips64le.go | 1 - .../x/sys/unix/zerrors_linux_mipsle.go | 1 - .../x/sys/unix/zerrors_linux_ppc.go | 1 - .../x/sys/unix/zerrors_linux_ppc64.go | 1 - .../x/sys/unix/zerrors_linux_ppc64le.go | 1 - .../x/sys/unix/zerrors_linux_riscv64.go | 4 +- .../x/sys/unix/zerrors_linux_s390x.go | 1 - .../x/sys/unix/zerrors_linux_sparc64.go | 1 - .../x/sys/unix/zerrors_netbsd_386.go | 1 - .../x/sys/unix/zerrors_netbsd_amd64.go | 1 - .../x/sys/unix/zerrors_netbsd_arm.go | 1 - .../x/sys/unix/zerrors_netbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_386.go | 1 - .../x/sys/unix/zerrors_openbsd_amd64.go | 1 - .../x/sys/unix/zerrors_openbsd_arm.go | 1 - .../x/sys/unix/zerrors_openbsd_arm64.go | 1 - .../x/sys/unix/zerrors_openbsd_mips64.go | 1 - .../x/sys/unix/zerrors_openbsd_ppc64.go | 1 - .../x/sys/unix/zerrors_openbsd_riscv64.go | 1 - .../x/sys/unix/zerrors_solaris_amd64.go | 1 - .../x/sys/unix/zerrors_zos_s390x.go | 1 - .../x/sys/unix/zptrace_armnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnn_linux.go | 2 - .../x/sys/unix/zptrace_mipsnnle_linux.go | 2 - .../x/sys/unix/zptrace_x86_linux.go | 2 - .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64_gc.go | 1 - .../x/sys/unix/zsyscall_aix_ppc64_gccgo.go | 1 - .../x/sys/unix/zsyscall_darwin_amd64.go | 1 - .../x/sys/unix/zsyscall_darwin_arm64.go | 1 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_386.go | 1 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 1 - .../x/sys/unix/zsyscall_freebsd_arm.go | 1 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 1 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 1 - .../x/sys/unix/zsyscall_illumos_amd64.go | 1 - .../golang.org/x/sys/unix/zsyscall_linux.go | 26 +- .../x/sys/unix/zsyscall_linux_386.go | 1 - .../x/sys/unix/zsyscall_linux_amd64.go | 1 - .../x/sys/unix/zsyscall_linux_arm.go | 1 - .../x/sys/unix/zsyscall_linux_arm64.go | 1 - .../x/sys/unix/zsyscall_linux_loong64.go | 1 - .../x/sys/unix/zsyscall_linux_mips.go | 1 - .../x/sys/unix/zsyscall_linux_mips64.go | 1 - .../x/sys/unix/zsyscall_linux_mips64le.go | 1 - .../x/sys/unix/zsyscall_linux_mipsle.go | 1 - .../x/sys/unix/zsyscall_linux_ppc.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64.go | 1 - .../x/sys/unix/zsyscall_linux_ppc64le.go | 1 - .../x/sys/unix/zsyscall_linux_riscv64.go | 1 - .../x/sys/unix/zsyscall_linux_s390x.go | 1 - .../x/sys/unix/zsyscall_linux_sparc64.go | 1 - .../x/sys/unix/zsyscall_netbsd_386.go | 1 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 1 - .../x/sys/unix/zsyscall_netbsd_arm.go | 1 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 1 - .../x/sys/unix/zsyscall_openbsd_386.go | 70 +- .../x/sys/unix/zsyscall_openbsd_386.s | 20 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_amd64.s | 20 + .../x/sys/unix/zsyscall_openbsd_arm.go | 70 +- .../x/sys/unix/zsyscall_openbsd_arm.s | 20 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_arm64.s | 20 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_mips64.s | 20 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_ppc64.s | 24 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 70 +- .../x/sys/unix/zsyscall_openbsd_riscv64.s | 20 + .../x/sys/unix/zsyscall_solaris_amd64.go | 1 - .../x/sys/unix/zsyscall_zos_s390x.go | 1 - .../x/sys/unix/zsysctl_openbsd_386.go | 1 - .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm.go | 1 - .../x/sys/unix/zsysctl_openbsd_arm64.go | 1 - .../x/sys/unix/zsysctl_openbsd_mips64.go | 1 - .../x/sys/unix/zsysctl_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysctl_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_darwin_amd64.go | 1 - .../x/sys/unix/zsysnum_darwin_arm64.go | 1 - .../x/sys/unix/zsysnum_dragonfly_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_386.go | 1 - .../x/sys/unix/zsysnum_freebsd_amd64.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm.go | 1 - .../x/sys/unix/zsysnum_freebsd_arm64.go | 1 - .../x/sys/unix/zsysnum_freebsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_linux_386.go | 2 +- .../x/sys/unix/zsysnum_linux_amd64.go | 3 +- .../x/sys/unix/zsysnum_linux_arm.go | 2 +- .../x/sys/unix/zsysnum_linux_arm64.go | 2 +- .../x/sys/unix/zsysnum_linux_loong64.go | 2 +- .../x/sys/unix/zsysnum_linux_mips.go | 2 +- .../x/sys/unix/zsysnum_linux_mips64.go | 2 +- .../x/sys/unix/zsysnum_linux_mips64le.go | 2 +- .../x/sys/unix/zsysnum_linux_mipsle.go | 2 +- .../x/sys/unix/zsysnum_linux_ppc.go | 2 +- .../x/sys/unix/zsysnum_linux_ppc64.go | 2 +- .../x/sys/unix/zsysnum_linux_ppc64le.go | 2 +- .../x/sys/unix/zsysnum_linux_riscv64.go | 2 +- .../x/sys/unix/zsysnum_linux_s390x.go | 2 +- .../x/sys/unix/zsysnum_linux_sparc64.go | 2 +- .../x/sys/unix/zsysnum_netbsd_386.go | 1 - .../x/sys/unix/zsysnum_netbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm.go | 1 - .../x/sys/unix/zsysnum_netbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_386.go | 1 - .../x/sys/unix/zsysnum_openbsd_amd64.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm.go | 1 - .../x/sys/unix/zsysnum_openbsd_arm64.go | 1 - .../x/sys/unix/zsysnum_openbsd_mips64.go | 1 - .../x/sys/unix/zsysnum_openbsd_ppc64.go | 1 - .../x/sys/unix/zsysnum_openbsd_riscv64.go | 1 - .../x/sys/unix/zsysnum_zos_s390x.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 - .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 - .../x/sys/unix/ztypes_darwin_amd64.go | 1 - .../x/sys/unix/ztypes_darwin_arm64.go | 1 - .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_386.go | 1 - .../x/sys/unix/ztypes_freebsd_amd64.go | 1 - .../x/sys/unix/ztypes_freebsd_arm.go | 1 - .../x/sys/unix/ztypes_freebsd_arm64.go | 1 - .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 - vendor/golang.org/x/sys/unix/ztypes_linux.go | 45 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 1 - .../x/sys/unix/ztypes_linux_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_linux_arm.go | 1 - .../x/sys/unix/ztypes_linux_arm64.go | 1 - .../x/sys/unix/ztypes_linux_loong64.go | 1 - .../x/sys/unix/ztypes_linux_mips.go | 1 - .../x/sys/unix/ztypes_linux_mips64.go | 1 - .../x/sys/unix/ztypes_linux_mips64le.go | 1 - .../x/sys/unix/ztypes_linux_mipsle.go | 1 - .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 1 - .../x/sys/unix/ztypes_linux_ppc64.go | 1 - .../x/sys/unix/ztypes_linux_ppc64le.go | 1 - .../x/sys/unix/ztypes_linux_riscv64.go | 1 - .../x/sys/unix/ztypes_linux_s390x.go | 1 - .../x/sys/unix/ztypes_linux_sparc64.go | 1 - .../x/sys/unix/ztypes_netbsd_386.go | 1 - .../x/sys/unix/ztypes_netbsd_amd64.go | 1 - .../x/sys/unix/ztypes_netbsd_arm.go | 1 - .../x/sys/unix/ztypes_netbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_386.go | 1 - .../x/sys/unix/ztypes_openbsd_amd64.go | 1 - .../x/sys/unix/ztypes_openbsd_arm.go | 1 - .../x/sys/unix/ztypes_openbsd_arm64.go | 1 - .../x/sys/unix/ztypes_openbsd_mips64.go | 1 - .../x/sys/unix/ztypes_openbsd_ppc64.go | 1 - .../x/sys/unix/ztypes_openbsd_riscv64.go | 1 - .../x/sys/unix/ztypes_solaris_amd64.go | 1 - .../golang.org/x/sys/unix/ztypes_zos_s390x.go | 1 - vendor/golang.org/x/sys/windows/aliases.go | 1 - vendor/golang.org/x/sys/windows/empty.s | 1 - vendor/golang.org/x/sys/windows/eventlog.go | 1 - vendor/golang.org/x/sys/windows/mksyscall.go | 1 - vendor/golang.org/x/sys/windows/race.go | 1 - vendor/golang.org/x/sys/windows/race0.go | 1 - .../golang.org/x/sys/windows/registry/key.go | 1 - .../x/sys/windows/registry/mksyscall.go | 1 - .../x/sys/windows/registry/syscall.go | 1 - .../x/sys/windows/registry/value.go | 1 - vendor/golang.org/x/sys/windows/service.go | 1 - vendor/golang.org/x/sys/windows/str.go | 1 - vendor/golang.org/x/sys/windows/syscall.go | 1 - .../x/sys/windows/syscall_windows.go | 7 +- .../golang.org/x/sys/windows/types_windows.go | 28 +- .../x/sys/windows/zsyscall_windows.go | 37 + vendor/golang.org/x/term/term_unix.go | 1 - vendor/golang.org/x/term/term_unix_bsd.go | 1 - vendor/golang.org/x/term/term_unix_other.go | 1 - vendor/golang.org/x/term/term_unsupported.go | 1 - vendor/golang.org/x/text/cases/icu.go | 1 - .../golang.org/x/text/cases/tables10.0.0.go | 1 - .../golang.org/x/text/cases/tables11.0.0.go | 1 - .../golang.org/x/text/cases/tables12.0.0.go | 1 - .../golang.org/x/text/cases/tables13.0.0.go | 1 - .../golang.org/x/text/cases/tables15.0.0.go | 1 - vendor/golang.org/x/text/cases/tables9.0.0.go | 1 - .../x/text/secure/bidirule/bidirule10.0.0.go | 1 - .../x/text/secure/bidirule/bidirule9.0.0.go | 1 - .../x/text/secure/precis/tables10.0.0.go | 1 - .../x/text/secure/precis/tables11.0.0.go | 1 - .../x/text/secure/precis/tables12.0.0.go | 1 - .../x/text/secure/precis/tables13.0.0.go | 1 - .../x/text/secure/precis/tables15.0.0.go | 1 - .../x/text/secure/precis/tables9.0.0.go | 1 - .../x/text/unicode/bidi/tables10.0.0.go | 1 - .../x/text/unicode/bidi/tables11.0.0.go | 1 - .../x/text/unicode/bidi/tables12.0.0.go | 1 - .../x/text/unicode/bidi/tables13.0.0.go | 1 - .../x/text/unicode/bidi/tables15.0.0.go | 1 - .../x/text/unicode/bidi/tables9.0.0.go | 1 - .../x/text/unicode/norm/tables10.0.0.go | 1 - .../x/text/unicode/norm/tables11.0.0.go | 1 - .../x/text/unicode/norm/tables12.0.0.go | 1 - .../x/text/unicode/norm/tables13.0.0.go | 1 - .../x/text/unicode/norm/tables15.0.0.go | 1 - .../x/text/unicode/norm/tables9.0.0.go | 1 - .../golang.org/x/text/width/tables10.0.0.go | 1 - .../golang.org/x/text/width/tables11.0.0.go | 1 - .../golang.org/x/text/width/tables12.0.0.go | 1 - .../golang.org/x/text/width/tables13.0.0.go | 1 - .../golang.org/x/text/width/tables15.0.0.go | 1 - vendor/golang.org/x/text/width/tables9.0.0.go | 1 - vendor/golang.org/x/time/rate/rate.go | 2 + .../x/tools/cmd/goimports/goimports.go | 9 +- .../x/tools/cmd/goimports/goimports_gc.go | 2 +- .../x/tools/cmd/stringer/stringer.go | 5 +- .../x/tools/go/ast/astutil/enclosing.go | 8 +- .../x/tools/go/ast/astutil/rewrite.go | 8 +- .../x/tools/go/gcexportdata/gcexportdata.go | 11 +- .../tools/go/internal/packagesdriver/sizes.go | 24 +- vendor/golang.org/x/tools/go/packages/doc.go | 36 +- .../x/tools/go/packages/external.go | 2 +- .../golang.org/x/tools/go/packages/golist.go | 118 +- .../x/tools/go/packages/golist_overlay.go | 492 - .../x/tools/go/packages/packages.go | 197 +- .../x/tools/go/types/objectpath/objectpath.go | 752 + .../x/tools/internal/event/keys/util.go | 21 + .../x/tools/internal/event/tag/tag.go | 59 + .../x/tools/internal/fastwalk/fastwalk.go | 196 - .../internal/fastwalk/fastwalk_darwin.go | 119 - .../fastwalk/fastwalk_dirent_fileno.go | 14 - .../internal/fastwalk/fastwalk_dirent_ino.go | 15 - .../fastwalk/fastwalk_dirent_namlen_bsd.go | 14 - .../fastwalk/fastwalk_dirent_namlen_linux.go | 29 - .../internal/fastwalk/fastwalk_portable.go | 38 - .../tools/internal/fastwalk/fastwalk_unix.go | 153 - .../x/tools/internal/gcimporter/bexport.go | 852 - .../x/tools/internal/gcimporter/bimport.go | 907 +- .../x/tools/internal/gcimporter/gcimporter.go | 30 +- .../x/tools/internal/gcimporter/iexport.go | 229 +- .../x/tools/internal/gcimporter/iimport.go | 228 +- .../tools/internal/gcimporter/ureader_yes.go | 50 +- .../x/tools/internal/gocommand/invoke.go | 173 +- .../x/tools/internal/gocommand/version.go | 18 +- .../x/tools/internal/gopathwalk/walk.go | 237 +- .../x/tools/internal/imports/fix.go | 103 +- .../x/tools/internal/imports/imports.go | 9 +- .../x/tools/internal/imports/mod.go | 17 +- .../x/tools/internal/imports/mod_cache.go | 2 +- .../x/tools/internal/imports/zstdlib.go | 230 + .../internal/packagesinternal/packages.go | 8 - .../internal/tokeninternal/tokeninternal.go | 92 + .../x/tools/internal/typeparams/common.go | 51 +- .../x/tools/internal/typeparams/coretype.go | 16 +- .../internal/typeparams/enabled_go117.go | 12 - .../internal/typeparams/enabled_go118.go | 15 - .../x/tools/internal/typeparams/normalize.go | 20 +- .../x/tools/internal/typeparams/termlist.go | 2 +- .../internal/typeparams/typeparams_go117.go | 197 - .../internal/typeparams/typeparams_go118.go | 151 - .../x/tools/internal/typeparams/typeterm.go | 9 +- .../x/tools/internal/versions/gover.go | 172 + .../x/tools/internal/versions/types.go | 19 + .../x/tools/internal/versions/types_go121.go | 20 + .../x/tools/internal/versions/types_go122.go | 24 + .../x/tools/internal/versions/versions.go | 52 + vendor/google.golang.org/api/internal/cba.go | 92 +- .../google.golang.org/api/internal/creds.go | 67 +- .../api/internal/gensupport/send.go | 21 + vendor/google.golang.org/api/internal/s2a.go | 2 +- .../api/internal/settings.go | 46 + .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 56 + vendor/google.golang.org/api/option/option.go | 13 + .../api/sqladmin/v1beta4/sqladmin-api.json | 277 +- .../api/sqladmin/v1beta4/sqladmin-gen.go | 628 +- .../api/transport/http/dial.go | 39 +- .../api/transport/http/dial_appengine.go | 21 - .../google.golang.org/appengine/.travis.yml | 18 - .../appengine/CONTRIBUTING.md | 6 +- vendor/google.golang.org/appengine/README.md | 6 +- .../google.golang.org/appengine/appengine.go | 23 +- .../appengine/appengine_vm.go | 12 +- .../google.golang.org/appengine/identity.go | 3 +- .../appengine/internal/api.go | 347 +- .../appengine/internal/api_classic.go | 29 +- .../appengine/internal/api_common.go | 50 +- .../appengine/internal/identity.go | 7 +- .../appengine/internal/identity_classic.go | 23 +- .../appengine/internal/identity_flex.go | 1 + .../appengine/internal/identity_vm.go | 20 +- .../appengine/internal/main.go | 1 + .../appengine/internal/main_vm.go | 3 +- .../internal/socket/socket_service.pb.go | 2822 - .../internal/socket/socket_service.proto | 460 - .../appengine/internal/transaction.go | 10 +- .../google.golang.org/appengine/namespace.go | 3 +- .../google.golang.org/appengine/socket/doc.go | 10 - .../appengine/socket/socket_classic.go | 290 - .../appengine/socket/socket_vm.go | 64 - vendor/google.golang.org/appengine/timeout.go | 2 +- .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - .../appengine/urlfetch/urlfetch.go | 9 +- vendor/google.golang.org/grpc/README.md | 60 +- .../grpc/attributes/attributes.go | 59 +- .../grpc/balancer/balancer.go | 62 +- .../grpc/balancer/base/balancer.go | 22 +- ...r_conn_wrappers.go => balancer_wrapper.go} | 337 +- .../grpc_binarylog_v1/binarylog.pb.go | 4 +- vendor/google.golang.org/grpc/call.go | 11 +- vendor/google.golang.org/grpc/clientconn.go | 696 +- vendor/google.golang.org/grpc/codec.go | 8 +- vendor/google.golang.org/grpc/codes/codes.go | 8 +- .../google.golang.org/grpc/credentials/tls.go | 75 +- vendor/google.golang.org/grpc/dialoptions.go | 78 +- .../grpc/encoding/encoding.go | 17 +- .../grpc/encoding/proto/proto.go | 4 +- .../grpc/grpclog/component.go | 40 +- .../google.golang.org/grpc/grpclog/grpclog.go | 30 +- .../google.golang.org/grpc/grpclog/logger.go | 30 +- .../grpc/grpclog/loggerv2.go | 56 +- vendor/google.golang.org/grpc/idle.go | 287 - vendor/google.golang.org/grpc/interceptor.go | 12 +- .../grpc/internal/backoff/backoff.go | 36 + .../balancer/gracefulswitch/gracefulswitch.go | 59 +- .../grpc/internal/balancerload/load.go | 4 +- .../grpc/internal/binarylog/method_logger.go | 4 +- .../grpc/internal/buffer/unbounded.go | 57 +- .../grpc/internal/channelz/funcs.go | 76 +- .../grpc/internal/channelz/logging.go | 12 +- .../grpc/internal/channelz/types.go | 5 + .../grpc/internal/channelz/util_linux.go | 2 +- .../grpc/internal/channelz/util_nonlinux.go | 2 +- .../grpc/internal/credentials/credentials.go | 8 +- .../grpc/internal/envconfig/envconfig.go | 11 +- .../grpc/internal/envconfig/xds.go | 39 - .../grpc/internal/experimental.go | 28 + .../grpc/internal/grpclog/grpclog.go | 40 +- .../grpc/internal/grpclog/prefixLogger.go | 8 +- .../grpc/internal/grpcrand/grpcrand.go | 7 + .../internal/grpcsync/callback_serializer.go | 75 +- .../grpc/internal/grpcsync/pubsub.go | 121 + .../grpc/internal/idle/idle.go | 278 + .../grpc/internal/internal.go | 74 +- .../grpc/internal/metadata/metadata.go | 2 +- .../grpc/internal/pretty/pretty.go | 2 +- .../grpc/internal/resolver/config_selector.go | 4 +- .../internal/resolver/dns/dns_resolver.go | 137 +- .../resolver/dns/internal/internal.go | 70 + .../grpc/internal/resolver/unix/unix.go | 4 + .../grpc/internal/status/status.go | 36 +- .../grpc/internal/tcp_keepalive_others.go | 29 + .../grpc/internal/tcp_keepalive_unix.go | 54 + .../grpc/internal/tcp_keepalive_windows.go | 54 + .../grpc/internal/transport/controlbuf.go | 16 +- .../grpc/internal/transport/handler_server.go | 76 +- .../grpc/internal/transport/http2_client.go | 78 +- .../grpc/internal/transport/http2_server.go | 141 +- .../grpc/internal/transport/http_util.go | 77 +- .../grpc/internal/transport/proxy.go | 14 +- .../grpc/internal/transport/transport.go | 39 +- .../grpc/metadata/metadata.go | 31 +- vendor/google.golang.org/grpc/peer/peer.go | 2 + .../google.golang.org/grpc/picker_wrapper.go | 53 +- vendor/google.golang.org/grpc/pickfirst.go | 76 +- vendor/google.golang.org/grpc/preloader.go | 2 +- .../grpc/resolver/dns/dns_resolver.go | 36 + vendor/google.golang.org/grpc/resolver/map.go | 123 +- .../grpc/resolver/resolver.go | 100 +- .../grpc/resolver_conn_wrapper.go | 239 - .../grpc/resolver_wrapper.go | 197 + vendor/google.golang.org/grpc/rpc_util.go | 52 +- vendor/google.golang.org/grpc/server.go | 538 +- .../grpc/shared_buffer_pool.go | 154 + vendor/google.golang.org/grpc/stats/stats.go | 14 +- .../google.golang.org/grpc/status/status.go | 39 +- vendor/google.golang.org/grpc/stream.go | 134 +- vendor/google.golang.org/grpc/tap/tap.go | 6 + vendor/google.golang.org/grpc/trace.go | 6 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 172 +- .../encoding/protodelim/protodelim.go | 160 + .../protobuf/encoding/protojson/decode.go | 38 +- .../protobuf/encoding/protojson/doc.go | 2 +- .../protobuf/encoding/protojson/encode.go | 39 +- .../encoding/protojson/well_known_types.go | 55 +- .../protobuf/encoding/prototext/decode.go | 8 +- .../protobuf/encoding/prototext/encode.go | 4 +- .../protobuf/encoding/protowire/wire.go | 28 +- .../protobuf/internal/descfmt/stringer.go | 183 +- .../protobuf/internal/filedesc/desc.go | 47 +- .../protobuf/internal/genid/descriptor_gen.go | 212 +- .../protobuf/internal/impl/codec_gen.go | 113 +- .../protobuf/internal/impl/legacy_message.go | 19 +- .../protobuf/internal/impl/message.go | 17 +- .../protobuf/internal/impl/pointer_reflect.go | 36 + .../protobuf/internal/impl/pointer_unsafe.go | 40 + ...ings_unsafe.go => strings_unsafe_go120.go} | 4 +- .../internal/strs/strings_unsafe_go121.go | 74 + .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 2 +- .../google.golang.org/protobuf/proto/doc.go | 58 +- .../protobuf/proto/encode.go | 2 +- .../protobuf/proto/extension.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 2 +- .../google.golang.org/protobuf/proto/proto.go | 18 +- .../protobuf/reflect/protodesc/desc.go | 29 +- .../protobuf/reflect/protodesc/desc_init.go | 24 + .../protobuf/reflect/protodesc/editions.go | 177 + .../reflect/protodesc/editions_defaults.binpb | 4 + .../protobuf/reflect/protodesc/proto.go | 18 +- .../protobuf/reflect/protoreflect/proto.go | 83 +- .../reflect/protoreflect/source_gen.go | 62 +- .../protobuf/reflect/protoreflect/type.go | 44 +- .../protobuf/reflect/protoreflect/value.go | 24 +- .../reflect/protoreflect/value_equal.go | 8 +- .../reflect/protoreflect/value_union.go | 44 +- ...{value_unsafe.go => value_unsafe_go120.go} | 4 +- .../protoreflect/value_unsafe_go121.go | 87 + .../reflect/protoregistry/registry.go | 24 +- .../types/descriptorpb/descriptor.pb.go | 2467 +- .../protobuf/types/known/anypb/any.pb.go | 3 +- vendor/howett.net/plist/.gitignore | 16 + vendor/howett.net/plist/README.md | 2 +- vendor/howett.net/plist/bplist_parser.go | 5 +- vendor/howett.net/plist/marshal.go | 18 +- vendor/howett.net/plist/plist.go | 2 - vendor/howett.net/plist/plist_types.go | 33 + vendor/howett.net/plist/text_generator.go | 2 + vendor/howett.net/plist/text_parser.go | 95 +- vendor/howett.net/plist/text_tables.go | 20 +- vendor/howett.net/plist/unmarshal.go | 20 +- vendor/howett.net/plist/xml_generator.go | 9 +- vendor/howett.net/plist/xml_parser.go | 9 +- vendor/modules.txt | 359 +- 2126 files changed, 149620 insertions(+), 87321 deletions(-) delete mode 100644 vendor/cloud.google.com/go/cloudsqlconn/connect_tls_117.go delete mode 100644 vendor/cloud.google.com/go/cloudsqlconn/connect_tls_other.go create mode 100644 vendor/cloud.google.com/go/cloudsqlconn/doc.go create mode 100644 vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{blockblob => internal/shared}/mmf_unix.go (73%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{blockblob => internal/shared}/mmf_windows.go (78%) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go create mode 100644 vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/context_watchdog.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array_gen.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go create mode 100644 vendor/github.com/ClickHouse/clickhouse-go/v2/main.tf create mode 100644 vendor/github.com/andybalholm/brotli/bitwriter.go create mode 100644 vendor/github.com/andybalholm/brotli/encoder.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/emitter.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/m0.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/m4.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go create mode 100644 vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/arrow/array/list.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/.gitignore delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/compress/brotli.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/compress/compress.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/compress/gzip.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/compress/snappy.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/compress/zstd.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/doc.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/encryption_properties.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_off.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/doc.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_off.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/GoUnusedProtection__.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet-consts.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/staticcheck.conf delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/reader_properties.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/types.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/version_string.go delete mode 100644 vendor/github.com/apache/arrow/go/v12/parquet/writer_properties.go rename vendor/github.com/apache/arrow/go/{v12 => v14}/LICENSE.txt (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/.editorconfig (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/.gitignore (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/Gopkg.lock (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/Gopkg.toml (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/Makefile (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array.go (89%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/array.go (92%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/binary.go (93%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/binarybuilder.go (92%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/boolean.go (88%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/booleanbuilder.go (88%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/bufferbuilder.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/bufferbuilder_byte.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/bufferbuilder_numeric.gen.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/bufferbuilder_numeric.gen.go.tmpl (94%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/builder.go (88%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/compare.go (81%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/concat.go (79%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/data.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/decimal128.go (89%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/decimal256.go (89%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/dictionary.go (87%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/diff.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/doc.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/encoded.go (80%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/extension.go (82%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/extension_builder.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/fixed_size_list.go (85%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/fixedsize_binary.go (90%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/fixedsize_binarybuilder.go (89%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/float16.go (89%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/float16_builder.go (89%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/interval.go (89%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/json_reader.go (96%) create mode 100644 vendor/github.com/apache/arrow/go/v14/arrow/array/list.go rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/map.go (91%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/null.go (86%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/numeric.gen.go (90%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/numeric.gen.go.tmpl (70%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/numericbuilder.gen.go (90%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/numericbuilder.gen.go.tmpl (80%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/numericbuilder.gen_test.go.tmpl (78%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/record.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/string.go (92%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/struct.go (85%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/table.go (87%) create mode 100644 vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/union.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/array/util.go (91%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/arrio/arrio.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/Makefile (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops_arm64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops_avx2_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops_avx2_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops_noasm.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops_ppc64le.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops_s390x.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops_sse4_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmap_ops_sse4_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitmaps.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/bitutil.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/endian_default.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/bitutil/endian_s390x.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compare.go (80%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/arithmetic.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/cast.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/datum.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/datumkind_string.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/doc.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/exec.go (95%) rename vendor/github.com/apache/arrow/go/{v12/arrow/compute/internal => v14/arrow/compute}/exec/hash_util.go (100%) rename vendor/github.com/apache/arrow/go/{v12/arrow/compute/internal => v14/arrow/compute}/exec/kernel.go (99%) rename vendor/github.com/apache/arrow/go/{v12/arrow/compute/internal => v14/arrow/compute}/exec/span.go (98%) rename vendor/github.com/apache/arrow/go/{v12/arrow/compute/internal => v14/arrow/compute}/exec/utils.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/executor.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/expression.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/fieldref.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/funckind_string.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/functions.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/Makefile (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/base_arithmetic.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/base_arithmetic_amd64.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/basic_arithmetic_noasm.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/boolean_cast.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast_numeric.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast_numeric_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/cast_temporal.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/compareoperator_string.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/constant_factor.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/constant_factor_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/constant_factor_avx2_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/constant_factor_avx2_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/constant_factor_sse4_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/constant_factor_sse4_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/doc.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/helpers.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/numeric_cast.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/rounding.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/roundmode_string.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/scalar_arithmetic.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/scalar_boolean.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/scalar_comparison_amd64.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/scalar_comparison_noasm.go (93%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/scalar_comparisons.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/string_casts.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/types.go (94%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/vector_hash.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/vector_run_end_encode.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/internal/kernels/vector_selection.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/registry.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/scalar_bool.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/scalar_compare.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/selection.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/utils.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/vector_hash.go (94%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/compute/vector_run_ends.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype.go (94%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype_binary.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype_encoded.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype_extension.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype_fixedwidth.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype_nested.go (69%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype_null.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype_numeric.gen.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype_numeric.gen.go.tmpl (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/datatype_numeric.gen.go.tmpldata (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/decimal128/decimal128.go (91%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/decimal256/decimal256.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/doc.go (91%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/encoded/ree_utils.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/endian/big.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/endian/endian.go (91%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/endian/little.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/errors.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/float16/float16.go (51%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/debug/assert_off.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/debug/assert_on.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/debug/doc.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/debug/log_off.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/debug/log_on.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/debug/util.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/dictutils/dict.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Binary.go (100%) create mode 100644 vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Block.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/BodyCompression.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/BodyCompressionMethod.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Bool.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Buffer.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/CompressionType.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Date.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/DateUnit.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Decimal.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/DictionaryBatch.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/DictionaryEncoding.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/DictionaryKind.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Duration.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Endianness.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Feature.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Field.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/FieldNode.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/FixedSizeBinary.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/FixedSizeList.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/FloatingPoint.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Footer.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Int.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Interval.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/IntervalUnit.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/KeyValue.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/LargeBinary.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/LargeList.go (100%) create mode 100644 vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/LargeUtf8.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/List.go (100%) create mode 100644 vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Map.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Message.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/MessageHeader.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/MetadataVersion.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Null.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Precision.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/RecordBatch.go (65%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/RunEndEncoded.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/RunLengthEncoded.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Schema.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/SparseMatrixIndexCSR.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/SparseMatrixIndexCSX.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/SparseTensor.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/SparseTensorIndex.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/SparseTensorIndexCOO.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/SparseTensorIndexCSF.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Struct_.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Tensor.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/TensorDim.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Time.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/TimeUnit.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Timestamp.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Type.go (89%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Union.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/UnionMode.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/flatbuf/Utf8.go (100%) create mode 100644 vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/internal/utils.go (94%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/ipc/compression.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/ipc/endian_swap.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/ipc/file_reader.go (94%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/ipc/file_writer.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/ipc/ipc.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/ipc/message.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/ipc/metadata.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/ipc/reader.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/ipc/writer.go (85%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/Makefile (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/allocator.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/buffer.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/cgo_allocator.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/cgo_allocator_defaults.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/cgo_allocator_logging.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/checked_allocator.go (55%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/default_allocator.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/default_mallocator.go (94%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/doc.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/go_allocator.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/internal/cgoalloc/allocator.cc (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/internal/cgoalloc/allocator.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/internal/cgoalloc/allocator.h (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/internal/cgoalloc/helpers.h (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/mallocator/doc.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/mallocator/mallocator.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_arm64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_avx2_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_avx2_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_js_wasm.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_neon_arm64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_neon_arm64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_noasm.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_sse4_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/memory_sse4_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/memory/util.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/numeric.schema.json (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/numeric.tmpldata (90%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/record.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/append.go (96%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/binary.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/compare.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/nested.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/numeric.gen.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/numeric.gen.go.tmpl (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/numeric.gen.go.tmpldata (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/numeric.gen_test.go.tmpl (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/parse.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/scalar.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/scalar/temporal.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/schema.go (88%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/table.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/tools.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/type_string.go (89%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/type_traits_boolean.go (95%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/type_traits_decimal128.go (81%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/type_traits_decimal256.go (78%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/type_traits_float16.go (81%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/type_traits_interval.go (79%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/type_traits_numeric.gen.go (69%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/type_traits_numeric.gen.go.tmpl (86%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/type_traits_numeric.gen_test.go.tmpl (97%) create mode 100644 vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go rename vendor/github.com/apache/arrow/go/{v12 => v14}/arrow/unionmode_string.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/bitutils/bit_block_counter.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/bitutils/bit_run_reader.go (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/bitutils/bit_set_run_reader.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/bitutils/bitmap_generate.go (98%) create mode 100644 vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go rename vendor/github.com/apache/arrow/go/{v12/parquet/internal/debug/log_on.go => v14/internal/hashing/hash_string.go} (81%) rename vendor/github.com/apache/arrow/go/{v12/parquet/tools.go => v14/internal/hashing/hash_string_go1.19.go} (57%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/hashing/types.tmpldata (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/hashing/xxh3_memo_table.gen.go (98%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/hashing/xxh3_memo_table.gen.go.tmpl (97%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/hashing/xxh3_memo_table.go (80%) create mode 100644 vendor/github.com/apache/arrow/go/v14/internal/json/json.go rename vendor/github.com/apache/arrow/go/{v12/parquet/internal/debug/assert_on.go => v14/internal/json/json_tinygo.go} (55%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/Makefile (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/buf_reader.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/endians_default.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/endians_s390x.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/math.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_arm64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_avx2_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_avx2_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_neon_arm64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_neon_arm64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_noasm.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_ppc64le.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_s390x.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_sse4_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/min_max_sse4_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints.go.tmpl (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints.tmpldata (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_amd64.go.tmpl (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_arm64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_avx2_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_avx2_amd64.s (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_def.go (99%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_noasm.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_noasm.go.tmpl (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_ppc64le.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_s390x.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_s390x.go.tmpl (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_simd.go.tmpl (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_sse4_amd64.go (100%) rename vendor/github.com/apache/arrow/go/{v12 => v14}/internal/utils/transpose_ints_sse4_amd64.s (100%) delete mode 100644 vendor/github.com/apache/thrift/NOTICE delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/buf_pool.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/client.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/configuration.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/context.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/exception.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/header_context.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_client.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/logger.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/middleware.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/numeric.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/response_helper.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/serializer.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/socket.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/socket_conn.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/socket_non_unix_conn.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/socket_unix_conn.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/staticcheck.conf delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/type.go delete mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go create mode 100644 vendor/github.com/aws/smithy-go/auth/auth.go create mode 100644 vendor/github.com/aws/smithy-go/auth/identity.go create mode 100644 vendor/github.com/aws/smithy-go/auth/option.go create mode 100644 vendor/github.com/aws/smithy-go/auth/scheme_id.go create mode 100644 vendor/github.com/aws/smithy-go/container/private/cache/cache.go create mode 100644 vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go create mode 100644 vendor/github.com/aws/smithy-go/endpoints/endpoint.go create mode 100644 vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go create mode 100644 vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go create mode 100644 vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go create mode 100644 vendor/github.com/aws/smithy-go/properties.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/auth.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/properties.go create mode 100644 vendor/github.com/elastic/go-sysinfo/.golangci.yml delete mode 100644 vendor/github.com/elastic/go-sysinfo/CHANGELOG.md create mode 100644 vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md create mode 100644 vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go create mode 100644 vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go create mode 100644 vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go create mode 100644 vendor/github.com/elastic/go-sysinfo/providers/darwin/process_nocgo_darwin.go create mode 100644 vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go create mode 100644 vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go create mode 100644 vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go create mode 100644 vendor/github.com/felixge/httpsnoop/.gitignore create mode 100644 vendor/github.com/felixge/httpsnoop/LICENSE.txt create mode 100644 vendor/github.com/felixge/httpsnoop/Makefile create mode 100644 vendor/github.com/felixge/httpsnoop/README.md create mode 100644 vendor/github.com/felixge/httpsnoop/capture_metrics.go create mode 100644 vendor/github.com/felixge/httpsnoop/docs.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go create mode 100644 vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go create mode 100644 vendor/github.com/go-faster/errors/join_go120.go create mode 100644 vendor/github.com/go-logr/logr/.golangci.yaml create mode 100644 vendor/github.com/go-logr/logr/CHANGELOG.md create mode 100644 vendor/github.com/go-logr/logr/CONTRIBUTING.md rename vendor/github.com/{matttproud/golang_protobuf_extensions => go-logr/logr}/LICENSE (100%) create mode 100644 vendor/github.com/go-logr/logr/README.md create mode 100644 vendor/github.com/go-logr/logr/SECURITY.md create mode 100644 vendor/github.com/go-logr/logr/context.go create mode 100644 vendor/github.com/go-logr/logr/context_noslog.go create mode 100644 vendor/github.com/go-logr/logr/context_slog.go create mode 100644 vendor/github.com/go-logr/logr/discard.go create mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go create mode 100644 vendor/github.com/go-logr/logr/funcr/slogsink.go create mode 100644 vendor/github.com/go-logr/logr/logr.go create mode 100644 vendor/github.com/go-logr/logr/sloghandler.go create mode 100644 vendor/github.com/go-logr/logr/slogr.go create mode 100644 vendor/github.com/go-logr/logr/slogsink.go rename vendor/github.com/{minio/c2goasm => go-logr/stdr}/LICENSE (99%) create mode 100644 vendor/github.com/go-logr/stdr/README.md create mode 100644 vendor/github.com/go-logr/stdr/stdr.go delete mode 100644 vendor/github.com/golang/snappy/.gitignore delete mode 100644 vendor/github.com/golang/snappy/AUTHORS delete mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/snappy/LICENSE delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/decode.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/decode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/decode_asm.go delete mode 100644 vendor/github.com/golang/snappy/decode_other.go delete mode 100644 vendor/github.com/golang/snappy/encode.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_arm64.s delete mode 100644 vendor/github.com/golang/snappy/encode_asm.go delete mode 100644 vendor/github.com/golang/snappy/encode_other.go delete mode 100644 vendor/github.com/golang/snappy/snappy.go rename vendor/github.com/{minio/asm2plan9s => google/flatbuffers}/LICENSE (100%) create mode 100644 vendor/github.com/google/s2a-go/retry/retry.go create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_key.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_key.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_key.pem delete mode 100644 vendor/github.com/google/uuid/.travis.yml create mode 100644 vendor/github.com/google/uuid/CHANGELOG.md create mode 100644 vendor/github.com/google/uuid/version6.go create mode 100644 vendor/github.com/google/uuid/version7.go create mode 100644 vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go delete mode 100644 vendor/github.com/jackc/pgservicefile/.travis.yml delete mode 100644 vendor/github.com/klauspost/asmfmt/.gitignore delete mode 100644 vendor/github.com/klauspost/asmfmt/.goreleaser.yml delete mode 100644 vendor/github.com/klauspost/asmfmt/LICENSE delete mode 100644 vendor/github.com/klauspost/asmfmt/README.md delete mode 100644 vendor/github.com/klauspost/asmfmt/asmfmt.go delete mode 100644 vendor/github.com/klauspost/asmfmt/cmd/asmfmt/LICENSE delete mode 100644 vendor/github.com/klauspost/asmfmt/cmd/asmfmt/doc.go delete mode 100644 vendor/github.com/klauspost/asmfmt/cmd/asmfmt/main.go create mode 100644 vendor/github.com/klauspost/compress/SECURITY.md delete mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go delete mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go delete mode 100644 vendor/github.com/klauspost/compress/flate/fast_encoder.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go delete mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go delete mode 100644 vendor/github.com/klauspost/compress/flate/inflate_gen.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level1.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level2.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level3.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level4.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level5.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level6.go delete mode 100644 vendor/github.com/klauspost/compress/flate/regmask_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/flate/regmask_other.go delete mode 100644 vendor/github.com/klauspost/compress/flate/stateless.go delete mode 100644 vendor/github.com/klauspost/compress/flate/token.go delete mode 100644 vendor/github.com/klauspost/compress/gzip/gunzip.go delete mode 100644 vendor/github.com/klauspost/compress/gzip/gzip.go delete mode 100644 vendor/github.com/klauspost/compress/huff0/bytereader.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/zstd/matchlen_generic.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go delete mode 100644 vendor/github.com/minio/asm2plan9s/.gitignore delete mode 100644 vendor/github.com/minio/asm2plan9s/README.md delete mode 100644 vendor/github.com/minio/asm2plan9s/asm2plan9s.go delete mode 100644 vendor/github.com/minio/asm2plan9s/asm2plan9s_amd64.go delete mode 100644 vendor/github.com/minio/asm2plan9s/asm2plan9s_arm64.go delete mode 100644 vendor/github.com/minio/asm2plan9s/example.s delete mode 100644 vendor/github.com/minio/asm2plan9s/neon.asm delete mode 100644 vendor/github.com/minio/asm2plan9s/yasm.go delete mode 100644 vendor/github.com/minio/c2goasm/README.md delete mode 100644 vendor/github.com/minio/c2goasm/arguments.go delete mode 100644 vendor/github.com/minio/c2goasm/assembly.go delete mode 100644 vendor/github.com/minio/c2goasm/c2goasm.go delete mode 100644 vendor/github.com/minio/c2goasm/constants.go delete mode 100644 vendor/github.com/minio/c2goasm/epilogue.go delete mode 100644 vendor/github.com/minio/c2goasm/subroutine.go create mode 100644 vendor/github.com/pierrec/lz4/v4/compressing_reader.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go create mode 100644 vendor/github.com/prometheus/common/model/metadata.go create mode 100644 vendor/github.com/snowflakedb/gosnowflake/client_configuration.go create mode 100644 vendor/github.com/snowflakedb/gosnowflake/codecov.yml create mode 100644 vendor/github.com/snowflakedb/gosnowflake/data1.txt.gz create mode 100644 vendor/github.com/snowflakedb/gosnowflake/easy_logging.go create mode 100644 vendor/github.com/snowflakedb/gosnowflake/htap.go rename vendor/{github.com/google/flatbuffers/LICENSE.txt => go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE} (99%) create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go create mode 100644 vendor/go.opentelemetry.io/otel/.codespellignore create mode 100644 vendor/go.opentelemetry.io/otel/.codespellrc create mode 100644 vendor/go.opentelemetry.io/otel/.gitattributes create mode 100644 vendor/go.opentelemetry.io/otel/.gitignore create mode 100644 vendor/go.opentelemetry.io/otel/.gitmodules create mode 100644 vendor/go.opentelemetry.io/otel/.golangci.yml create mode 100644 vendor/go.opentelemetry.io/otel/.lycheeignore create mode 100644 vendor/go.opentelemetry.io/otel/.markdownlint.yaml create mode 100644 vendor/go.opentelemetry.io/otel/CHANGELOG.md create mode 100644 vendor/go.opentelemetry.io/otel/CODEOWNERS create mode 100644 vendor/go.opentelemetry.io/otel/CONTRIBUTING.md create mode 100644 vendor/go.opentelemetry.io/otel/Makefile create mode 100644 vendor/go.opentelemetry.io/otel/README.md create mode 100644 vendor/go.opentelemetry.io/otel/RELEASING.md create mode 100644 vendor/go.opentelemetry.io/otel/VERSIONING.md create mode 100644 vendor/go.opentelemetry.io/otel/attribute/filter.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/baggage/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/error_handler.go create mode 100644 vendor/go.opentelemetry.io/otel/get_main_pkgs.sh create mode 100644 vendor/go.opentelemetry.io/otel/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/baggage/context.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/handler.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/instruments.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/propagator.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/state.go create mode 100644 vendor/go.opentelemetry.io/otel/internal/global/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/internal_logging.go create mode 100644 vendor/go.opentelemetry.io/otel/metric.go rename vendor/{github.com/apache/thrift => go.opentelemetry.io/otel/metric}/LICENSE (73%) create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/asyncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/config.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/instrument.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/meter.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncfloat64.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/syncint64.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/baggage.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/propagation.go create mode 100644 vendor/go.opentelemetry.io/otel/propagation/trace_context.go create mode 100644 vendor/go.opentelemetry.io/otel/requirements.txt create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go create mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh rename vendor/{github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go => go.opentelemetry.io/otel/version.go} (73%) create mode 100644 vendor/go.opentelemetry.io/otel/versions.yaml delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go rename vendor/golang.org/x/exp/slices/{zsortfunc.go => zsortanyfunc.go} (64%) delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 vendor/golang.org/x/net/http2/go111.go delete mode 100644 vendor/golang.org/x/net/http2/go115.go delete mode 100644 vendor/golang.org/x/net/http2/go118.go delete mode 100644 vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 vendor/golang.org/x/net/http2/not_go115.go delete mode 100644 vendor/golang.org/x/net/http2/not_go118.go create mode 100644 vendor/golang.org/x/oauth2/deviceauth.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go rename vendor/golang.org/x/oauth2/google/internal/{externalaccount => stsexchange}/clientauth.go (88%) rename vendor/golang.org/x/oauth2/google/internal/{externalaccount => stsexchange}/sts_exchange.go (68%) create mode 100644 vendor/golang.org/x/oauth2/pkce.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go118.go delete mode 100644 vendor/golang.org/x/sys/execabs/execabs_go119.go create mode 100644 vendor/golang.org/x/tools/go/types/objectpath/objectpath.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/util.go create mode 100644 vendor/golang.org/x/tools/internal/event/tag/tag.go delete mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go delete mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go delete mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go delete mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go delete mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go delete mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go delete mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go delete mode 100644 vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/bexport.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go delete mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go create mode 100644 vendor/golang.org/x/tools/internal/versions/gover.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go create mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go create mode 100644 vendor/golang.org/x/tools/internal/versions/versions.go delete mode 100644 vendor/google.golang.org/api/transport/http/dial_appengine.go delete mode 100644 vendor/google.golang.org/appengine/.travis.yml delete mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.proto delete mode 100644 vendor/google.golang.org/appengine/socket/doc.go delete mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go delete mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go delete mode 100644 vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 vendor/google.golang.org/appengine/travis_test.sh rename vendor/google.golang.org/grpc/{balancer_conn_wrappers.go => balancer_wrapper.go} (52%) delete mode 100644 vendor/google.golang.org/grpc/idle.go create mode 100644 vendor/google.golang.org/grpc/internal/experimental.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go create mode 100644 vendor/google.golang.org/grpc/internal/idle/idle.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/resolver_wrapper.go create mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe.go => strings_unsafe_go120.go} (96%) create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe.go => value_unsafe_go120.go} (97%) create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go create mode 100644 vendor/howett.net/plist/.gitignore diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index f933f018..a624c176 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: '^1.20.5' + go-version: '^1.21.5' - name: Build run: go build -v ./... @@ -23,4 +23,4 @@ jobs: - uses: dominikh/staticcheck-action@v1.3.0 with: - version: "2022.1.3" + version: "2023.1" diff --git a/go.mod b/go.mod index 5019a390..acda7351 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,12 @@ module github.com/justwatchcom/sql_exporter -go 1.20 +go 1.21 + +toolchain go1.21.5 require ( - cloud.google.com/go/cloudsqlconn v1.4.0 - github.com/ClickHouse/clickhouse-go/v2 v2.10.1 + cloud.google.com/go/cloudsqlconn v1.6.0 + github.com/ClickHouse/clickhouse-go/v2 v2.17.1 github.com/cenkalti/backoff v2.2.1+incompatible github.com/denisenkom/go-mssqldb v0.12.3 github.com/go-kit/log v0.2.1 @@ -12,114 +14,112 @@ require ( github.com/gobwas/glob v0.2.3 github.com/jmoiron/sqlx v1.3.5 github.com/lib/pq v1.10.9 - github.com/prometheus/client_golang v1.17.0 - github.com/prometheus/common v0.44.0 + github.com/prometheus/client_golang v1.18.0 + github.com/prometheus/common v0.46.0 github.com/robfig/cron/v3 v3.0.1 - github.com/segmentio/go-athena v0.0.0-20181208004937-dfa5f1818930 - github.com/snowflakedb/gosnowflake v1.6.22 - github.com/vertica/vertica-sql-go v1.3.2 - google.golang.org/api v0.130.0 + github.com/segmentio/go-athena v0.0.0-20230626212750-5fac08ed8dab + github.com/snowflakedb/gosnowflake v1.7.2 + github.com/vertica/vertica-sql-go v1.3.3 + google.golang.org/api v0.160.0 gopkg.in/yaml.v2 v2.4.0 ) require ( - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 // indirect - github.com/ClickHouse/ch-go v0.52.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 // indirect + github.com/ClickHouse/ch-go v0.61.2 // indirect github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c // indirect - github.com/andybalholm/brotli v1.0.5 // indirect - github.com/apache/arrow/go/v12 v12.0.0 // indirect - github.com/apache/thrift v0.16.0 // indirect - github.com/aws/aws-sdk-go v1.40.45 // indirect - github.com/aws/aws-sdk-go-v2 v1.17.7 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 // indirect - github.com/aws/smithy-go v1.13.5 // indirect + github.com/andybalholm/brotli v1.1.0 // indirect + github.com/apache/arrow/go/v14 v14.0.2 // indirect + github.com/aws/aws-sdk-go v1.50.6 // indirect + github.com/aws/aws-sdk-go-v2 v1.24.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 // indirect + github.com/aws/smithy-go v1.19.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/danieljoos/wincred v1.1.2 // indirect - github.com/dvsekhvalnov/jose2go v1.5.0 // indirect - github.com/elastic/go-sysinfo v1.8.1 // indirect - github.com/elastic/go-windows v1.0.0 // indirect + github.com/danieljoos/wincred v1.2.1 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/elastic/go-sysinfo v1.11.2 // indirect + github.com/elastic/go-windows v1.0.1 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect - github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/go-faster/city v1.0.1 // indirect - github.com/go-faster/errors v0.6.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/goccy/go-json v0.10.0 // indirect + github.com/go-faster/errors v0.7.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/goccy/go-json v0.10.2 // indirect github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/flatbuffers v23.1.21+incompatible // indirect - github.com/google/s2a-go v0.1.4 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect - github.com/googleapis/gax-go/v2 v2.11.0 // indirect + github.com/google/flatbuffers v23.5.26+incompatible // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect - github.com/jackc/pgconn v1.14.0 // indirect + github.com/jackc/pgconn v1.14.1 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgproto3/v2 v2.3.2 // indirect - github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect + github.com/jackc/pgtype v1.14.1 // indirect github.com/jackc/pgx/v4 v4.18.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect - github.com/klauspost/asmfmt v1.3.2 // indirect - github.com/klauspost/compress v1.15.15 // indirect - github.com/klauspost/cpuid/v2 v2.2.3 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect - github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect + github.com/klauspost/compress v1.17.5 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/mtibben/percent v0.2.1 // indirect - github.com/paulmach/orb v0.9.0 // indirect - github.com/pierrec/lz4/v4 v4.1.17 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/paulmach/orb v0.11.1 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect - github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.13.0 // indirect - go.opentelemetry.io/otel/trace v1.13.0 // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect - golang.org/x/mod v0.8.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.6.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 // indirect - google.golang.org/grpc v1.56.1 // indirect - google.golang.org/protobuf v1.31.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect + go.opentelemetry.io/otel v1.22.0 // indirect + go.opentelemetry.io/otel/metric v1.22.0 // indirect + go.opentelemetry.io/otel/trace v1.22.0 // indirect + golang.org/x/crypto v0.18.0 // indirect + golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.20.0 // indirect + golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/sync v0.6.0 // indirect + golang.org/x/sys v0.16.0 // indirect + golang.org/x/term v0.16.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.17.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect + google.golang.org/grpc v1.61.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect + howett.net/plist v1.0.1 // indirect ) diff --git a/go.sum b/go.sum index eb7408a5..79357708 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/cloudsqlconn v1.4.0 h1:E4vzb0yTU4r1lJyz8XRWw+7f+jOaKmF8dt4Utr6x8f4= -cloud.google.com/go/cloudsqlconn v1.4.0/go.mod h1:cFZ/+oLBaAJyH5FXL4k6RdFN9BVNaTnYnrrE29MDQbo= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/cloudsqlconn v1.6.0 h1:47IKX+sngheiKyuHnvTA3a6+lQwNsmgSD4N2YnWaT5Q= +cloud.google.com/go/cloudsqlconn v1.6.0/go.mod h1:w66+POP84g5lIeiD8FhJ0pSAgK6o4+ixCDLDutF3Amg= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= @@ -11,95 +10,91 @@ github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= github.com/Azure/azure-sdk-for-go/sdk/azcore v0.19.0/go.mod h1:h6H6c8enJmmocHUbLiiGY6sx7f9i+X3m1CHdd5c6Rdw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 h1:8kDqDngH+DmVBiCtIjCFTGa7MBnsIOkF9IccInFEbjk= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v0.11.0/go.mod h1:HcM1YX14R7CJcghJGOYCgdezslRSVzqwLf/q+4Y2r/0= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0/go.mod h1:OQeznEEkTZ9OrhHJoDD8ZDq51FHgXjqtP9z6bEwBq9U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2TzfVZ1pCb5vxm4BtZPUdYWe/Xo8= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= -github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 h1:OBhqkivkhkMqLPymWEppkm7vgPQY2XsHoEkaMQ0AdZY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/ClickHouse/ch-go v0.52.1 h1:nucdgfD1BDSHjbNaG3VNebonxJzD8fX8jbuBpfo5VY0= -github.com/ClickHouse/ch-go v0.52.1/go.mod h1:B9htMJ0hii/zrC2hljUKdnagRBuLqtRG/GrU3jqCwRk= -github.com/ClickHouse/clickhouse-go/v2 v2.10.1 h1:WCnusqEeCO/9sLFVIv57le/O1ydUb+x9+SYYhJ11fsY= -github.com/ClickHouse/clickhouse-go/v2 v2.10.1/go.mod h1:teXfZNM90iQ99Jnuht+dxQXCuhDZ8nvvMoTJOFrcmcg= +github.com/ClickHouse/ch-go v0.61.2 h1:8+8eKO2VgxoRa0yLJpWwkqJxi/jrtP5Z+J6eZdPfwdc= +github.com/ClickHouse/ch-go v0.61.2/go.mod h1:ZSVIE1A7mGJNcJeBvVF1v5bo12n0Wmnw30RhnPCpLzg= +github.com/ClickHouse/clickhouse-go/v2 v2.17.1 h1:ZCmAYWpu75IyEi7+Yrs/uaAjiCGY5wfW5kXo64exkX4= +github.com/ClickHouse/clickhouse-go/v2 v2.17.1/go.mod h1:rkGTvFDTLqLIm0ma+13xmcCfr/08Gvs7KmFt1tgiWHQ= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:RGWPOewvKIROun94nF7v2cua9qP+thov/7M50KEoeSU= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= -github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/arrow/go/v12 v12.0.0 h1:xtZE63VWl7qLdB0JObIXvvhGjoVNrQ9ciIHG2OK5cmc= -github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg= -github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= -github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/aws/aws-sdk-go v1.40.45 h1:QN1nsY27ssD/JmW4s83qmSb+uL6DG4GmCDzjmJB4xUI= -github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= -github.com/aws/aws-sdk-go-v2 v1.17.7 h1:CLSjnhJSTSogvqUGhIC6LqFKATMRexcxLZ0i/Nzk9Eg= -github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= -github.com/aws/aws-sdk-go-v2/config v1.18.19 h1:AqFK6zFNtq4i1EYu+eC7lcKHYnZagMn6SW171la0bGw= -github.com/aws/aws-sdk-go-v2/config v1.18.19/go.mod h1:XvTmGMY8d52ougvakOv1RpiTLPz9dlG/OQHsKU/cMmY= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18 h1:EQMdtHwz0ILTW1hoP+EwuWhwCG1hD6l3+RWFQABET4c= -github.com/aws/aws-sdk-go-v2/credentials v1.13.18/go.mod h1:vnwlwjIe+3XJPBYKu1et30ZPABG3VaXJYr8ryohpIyM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 h1:gt57MN3liKiyGopcqgNzJb2+d9MJaKT/q1OksHNXVE4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1/go.mod h1:lfUx8puBRdM5lVVMQlwt2v+ofiG/X6Ms+dy0UkG/kXw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 h1:E3Y+OfzOK1+rmRo/K2G0ml8Vs+Xqk0kOnf4nS0kUtBc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59/go.mod h1:1M4PLSBUVfBI0aP+C9XI7SM6kZPCGYyI6izWz0TGprE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 h1:sJLYcS+eZn5EeNINGHSCRAwUJMFVqklwkH36Vbyai7M= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31/go.mod h1:QT0BqUvX1Bh2ABdTGnjqEjvjzrCfIniM9Sc8zn9Yndo= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 h1:1mnRASEKnkqsntcxHaysxwgVoUUp5dkiB+l3llKnqyg= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25/go.mod h1:zBHOPwhBc3FlQjQJE/D3IfPWiWaQmT06Vq9aNukDo0k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32 h1:p5luUImdIqywn6JpQsW3tq5GNOxKmOnEpybzPx+d1lk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.32/go.mod h1:XGhIBZDEgfqmFIugclZ6FU7v75nHhBDtzuB4xB/tEi4= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 h1:DWYZIsyqagnWL00f8M/SOr9fN063OEQWn9LLTbdYXsk= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23/go.mod h1:uIiFgURZbACBEQJfqTZPb/jxO7R+9LeoHUFudtIdeQI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 h1:CeuSeq/8FnYpPtnuIeLQEEvDv9zUjneuYi8EghMBdwQ= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26/go.mod h1:2UqAAwMUXKeRkAHIlDJqvMVgOWkUi/AUXPk/YIe+Dg4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 h1:5LHn8JQ0qvjD9L9JhMtylnkcw7j05GDZqM9Oin6hpr0= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25/go.mod h1:/95IA+0lMnzW6XzqYJRpjjsAbKEORVeO0anQqjd2CNU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 h1:e2ooMhpYGhDnBfSvIyusvAwX7KexuZaHbQY2Dyei7VU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0/go.mod h1:bh2E0CXKZsQN+faiKVqC40vfNMAWheoULBCnEgO9K+8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 h1:B1G2pSPvbAtQjilPq+Y7jLIzCOwKzuVEl+aBBaNG0AQ= -github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0/go.mod h1:ncltU6n4Nof5uJttDtcNQ537uNuwYqsZZQcpkd2/GUQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6 h1:5V7DWLBd7wTELVz5bPpwzYy/sikk0gsgZfj40X+l5OI= -github.com/aws/aws-sdk-go-v2/service/sso v1.12.6/go.mod h1:Y1VOmit/Fn6Tz1uFAeCO6Q7M2fmfXSCLeL5INVYsLuY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6 h1:B8cauxOH1W1v7rd8RdI/MWnoR4Ze0wIHWrb90qczxj4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.6/go.mod h1:Lh/bc9XUf8CfOY6Jp5aIkQtN+j1mc+nExc+KXj9jx2s= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7 h1:bWNgNdRko2x6gqa0blfATqAZKZokPIeM1vfmQt2pnvM= -github.com/aws/aws-sdk-go-v2/service/sts v1.18.7/go.mod h1:JuTnSoeePXmMVe9G8NcjjwgOKEfZ4cOjMuT2IBT/2eI= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/apache/arrow/go/v14 v14.0.2 h1:N8OkaJEOfI3mEZt07BIkvo4sC6XDbL+48MBPWO5IONw= +github.com/apache/arrow/go/v14 v14.0.2/go.mod h1:u3fgh3EdgN/YQ8cVQRguVW3R+seMybFg8QBQ5LU+eBY= +github.com/aws/aws-sdk-go v1.50.6 h1:FaXvNwHG3Ri1paUEW16Ahk9zLVqSAdqa1M3phjZR35Q= +github.com/aws/aws-sdk-go v1.50.6/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= +github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= +github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= +github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 h1:2MUXyGW6dVaQz6aqycpbdLIH1NMcUI6kW6vQ0RabGYg= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15/go.mod h1:aHbhbR6WEQgHAiRj41EQ2W47yOYwNtIkWTXmcAtYqj8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 h1:5oE2WzJE56/mVveuDZPJESKlg/00AaS2pY2QZcnxg4M= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10/go.mod h1:FHbKWQtRBYUz4vO5WBWjzMD2by126ny5y/1EoaWoLfI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 h1:L0ai8WICYHozIKK+OtPzVJBugL7culcuM4E4JOpIEm8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10/go.mod h1:byqfyxJBshFk0fF9YmK0M0ugIO8OWjzH2T3bPG4eGuA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 h1:KOxnQeWy5sXyS37fdKEvAsGHOr9fa/qvwxfJurR/BzE= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10/go.mod h1:jMx5INQFYFYB3lQD9W0D8Ohgq6Wnl7NYOJ2TQndbulI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 h1:5XNlsBsEvBZBMO6p82y+sqpWg8j5aBCe+5C2GBFgqBQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= -github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= +github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -107,48 +102,63 @@ github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+ github.com/denisenkom/go-mssqldb v0.12.3/go.mod h1:k0mtMFOnU+AihqFxPMiF05rtiDrorD1Vrm1KEz5hxDo= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= -github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= -github.com/elastic/go-sysinfo v1.8.1 h1:4Yhj+HdV6WjbCRgGdZpPJ8lZQlXZLKDAeIkmQ/VRvi4= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= +github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/elastic/go-sysinfo v1.8.1/go.mod h1:JfllUnzoQV/JRYymbH3dO1yggI3mV2oTKSXsDHM+uIM= -github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= +github.com/elastic/go-sysinfo v1.11.2 h1:mcm4OSYVMyws6+n2HIVMGkln5HOpo5Ie1ZmbbNn0jg4= +github.com/elastic/go-sysinfo v1.11.2/go.mod h1:GKqR8bbMK/1ITnez9NIsIfXQr25aLhRJa7AfT8HpBFQ= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= +github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= +github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= -github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI= -github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY= +github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AYg= +github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.0 h1:mXKd9Qw4NuzShiRlOXKews24ufknHO7gx30lsDyokKA= -github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -159,28 +169,22 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/flatbuffers v23.1.21+incompatible h1:bUqzx/MXCDxuS0hRJL2EfjyZL3uQrPbMocUa8zGqsTA= -github.com/google/flatbuffers v23.1.21+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg= +github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -189,19 +193,18 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= @@ -214,8 +217,9 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= -github.com/jackc/pgconn v1.14.0 h1:vrbA9Ud87g6JdFWkHTJXppVce58qPIdP7N8y0Ml/A7Q= github.com/jackc/pgconn v1.14.0/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= +github.com/jackc/pgconn v1.14.1 h1:smbxIaZA08n6YuxEX1sDyjV/qkbtUtkH20qLkR9MUR4= +github.com/jackc/pgconn v1.14.1/go.mod h1:9mBNlny0UvkgJdCDvdVHYSjI+8tD2rnKK69Wz8ti++E= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= @@ -234,24 +238,31 @@ github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwX github.com/jackc/pgproto3/v2 v2.3.2 h1:7eY55bdBeCz1F2fTzSz69QC+pG46jYq9/jtSPiJ5nn0= github.com/jackc/pgproto3/v2 v2.3.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= -github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 h1:L0QtFUgDarD7Fpv9jeVMgy/+Ec0mtnmYuImjTz6dtDA= +github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= -github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgtype v1.14.1 h1:LyDar7M2K0tShCWqzJ/ctzF1QC3Wzc9c8a6cHE0PFdc= +github.com/jackc/pgtype v1.14.1/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= github.com/jackc/pgx/v4 v4.18.1 h1:YP7G1KABtKpB5IHrO9vYwSrCOhs7p3uqhvhhQBptya0= github.com/jackc/pgx/v4 v4.18.1/go.mod h1:FydWkUyadDmdNH/mHnGob881GawxeEm7TcMCzkb+qQE= +github.com/jackc/pgx/v5 v5.5.2 h1:iLlpgp4Cp/gC9Xuscl7lFL1PhhW+ZLtXZcrfCt4C3tA= +github.com/jackc/pgx/v5 v5.5.2/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0= github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -263,22 +274,23 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= -github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= -github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= -github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/klauspost/compress v1.17.5 h1:d4vBd+7CHydUqpFBgUEKkSdtSugf9YFmSkvUYPquI5E= +github.com/klauspost/compress v1.17.5/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -292,46 +304,45 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/microsoft/go-mssqldb v1.3.0 h1:JcPVl+acL8Z/cQcJc9zP0OkjQ+l20bco/cCDpMbmGJk= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= -github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= -github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/microsoft/go-mssqldb v1.6.0 h1:mM3gYdVwEPFrlg/Dvr2DNVEgYFG7L42l+dGc67NNNpc= +github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/paulmach/orb v0.9.0 h1:MwA1DqOKtvCgm7u9RZ/pnYejTeDJPnr0+0oFajBbJqk= -github.com/paulmach/orb v0.9.0/go.mod h1:SudmOk85SXtmXAB3sLGyJ6tZy/8pdfrV0o6ef98Xc30= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= +github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= -github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= -github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= -github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= -github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= +github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= @@ -339,18 +350,18 @@ github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= -github.com/segmentio/go-athena v0.0.0-20181208004937-dfa5f1818930 h1:Tn2Ryh7e9oN9TK19Y0vP/1Rfr1/DqBxXC8qrWKS4ez8= -github.com/segmentio/go-athena v0.0.0-20181208004937-dfa5f1818930/go.mod h1:umGD11uSGUY8Vd0lbo1jJUEAk4FxVV3YE5wRSXx1Lbk= +github.com/segmentio/go-athena v0.0.0-20230626212750-5fac08ed8dab h1:C56AWluoKnwfg31uBj91htttC26hyp3D4lDpnt2vJco= +github.com/segmentio/go-athena v0.0.0-20230626212750-5fac08ed8dab/go.mod h1:umGD11uSGUY8Vd0lbo1jJUEAk4FxVV3YE5wRSXx1Lbk= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/snowflakedb/gosnowflake v1.6.22 h1:2crLpqmFVyV03NPAxxAtzQBMFn6wUPqOJ1uRl4ruOJ4= -github.com/snowflakedb/gosnowflake v1.6.22/go.mod h1:P2fE/xiD2kQXpr48OdgnazkzPsKD6aVtnHD3WP8yD9c= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/snowflakedb/gosnowflake v1.7.2 h1:HRSwva8YXC64WUppfmHcMNVVzSE1+EwXXaJxgS0EkTo= +github.com/snowflakedb/gosnowflake v1.7.2/go.mod h1:03tW856vc3ceM4rJuj7KO4dzqN7qoezTm+xw7aPIIFo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -366,10 +377,11 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/vertica/vertica-sql-go v1.3.2 h1:QclPnkuozQyNl6lbrTdeuFSx2/lcSRZc1XL8zWNSjdA= -github.com/vertica/vertica-sql-go v1.3.2/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= +github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw= +github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= @@ -378,17 +390,21 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y= -go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg= -go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY= -go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -411,14 +427,13 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg= -golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -428,33 +443,28 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -463,8 +473,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -476,26 +486,23 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= +golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -504,10 +511,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -523,41 +530,40 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E= -google.golang.org/api v0.130.0 h1:A50ujooa1h9iizvfzA4rrJr2B7uRmWexwbekQ2+5FPQ= -google.golang.org/api v0.130.0/go.mod h1:J/LCJMYSDFvAVREGCbrESb53n4++NMBDetSHGL5I5RY= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.12.0 h1:xKuo6hzt+gMav00meVPUlXwSdoEJP46BR+wdxQEFK2o= +gonum.org/v1/gonum v0.12.0/go.mod h1:73TDxJfAAHeA8Mk9mf8NlIppyhQNo5GLTcYeqgo2lvY= +google.golang.org/api v0.160.0 h1:SEspjXHVqE1m5a1fRy8JFB+5jSu+V0GEDKDghF3ttO4= +google.golang.org/api v0.160.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 h1:DEH99RbiLZhMxrpEJCZ0A+wdTe0EOgou/poSLx9vWf4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= +google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= +google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917 h1:rcS6EyEaoCO52hQDupoSfrxI3R6C2Tq741is7X8OvnM= +google.golang.org/genproto/googleapis/api v0.0.0-20240102182953-50ed04b92917/go.mod h1:CmlNWB9lSezaYELKS5Ym1r44VrrbPUa7JTvw+6MbpJ0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.56.1 h1:z0dNfjIl0VpaZ9iSVjA6daGatAYwPGstTjt5vkRMFkQ= -google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= +google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -570,17 +576,18 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= @@ -591,5 +598,6 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM= +howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= diff --git a/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md b/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md index e6b6513d..7803fb8d 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md +++ b/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md @@ -1,5 +1,75 @@ # Changelog +## [1.6.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.5.2...v1.6.0) (2024-01-17) + + +### Features + +* add connection name to public API ([#698](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/698)) ([84f3b6e](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/84f3b6eedcf13402bcbf7da720924cf242893beb)) + +## [1.5.2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.5.1...v1.5.2) (2023-12-12) + + +### Bug Fixes + +* ensure cert refresh recovers from sleep ([#686](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/686)) ([95671ad](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/95671ada40905cf14209b5c54058463689ce6b20)) + +## [1.5.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.5.0...v1.5.1) (2023-11-14) + + +### Bug Fixes + +* bump dependencies to latest ([#667](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/667)) ([86544f5](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/86544f5a477f694c8ceb862b13c3b83d19d72d5d)) + +## [1.5.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.5...v1.5.0) (2023-10-24) + + +### Features + +* add pgx v5 support ([#639](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/639)) ([#642](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/642)) ([8d86d92](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/8d86d92147d06ca10d754439638d6fd1b2154182)) + + +### Bug Fixes + +* use different driver names for v4 and v5 testing ([#639](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/639)) ([#654](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/654)) ([fa73c41](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/fa73c4184a9887e6e9217e5b50db97aa3fdc0d28)) +* use HandshakeContext by default ([#656](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/656)) ([49aad1f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/49aad1f30bf560e6cf1e2ff52da46f3ff2cd2312)) + +## [1.4.5](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.4...v1.4.5) (2023-10-11) + + +### Bug Fixes + +* bump dependencies to latest ([#649](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/649)) ([0ddac9f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/0ddac9fa7de17f740021408ed25ffbb0b0133d9e)) +* bump minimum supported Go version to 1.19 ([#637](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/637)) ([4a28a78](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4a28a788a94d64e1ce6ddd76fa3a041c82c8f2b1)) + +## [1.4.4](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.3...v1.4.4) (2023-09-12) + + +### Bug Fixes + +* update dependencies to latest versions ([#621](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/621)) ([32f1e27](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/32f1e2762b8ced0a3332e4928fdc61ad5d731530)) + +## [1.4.3](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.2...v1.4.3) (2023-08-18) + + +### Bug Fixes + +* update ForceRefresh to block if invalid ([#605](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/605)) ([61c72e3](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/61c72e3e76d04863b6971aeb86726c3b1252e5ed)) + +## [1.4.2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.1...v1.4.2) (2023-08-15) + + +### Bug Fixes + +* re-use existing connection info on force refresh ([#602](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/602)) ([d049851](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/d049851361fc48bb339232c6609a2f2932d2d684)) + +## [1.4.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.0...v1.4.1) (2023-08-07) + + +### Bug Fixes + +* avoid holding lock over IO ([#576](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/576)) ([1e4560f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/1e4560f7b41547882a2e9f7ef3ece94bb1bb48be)) + ## [1.4.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.3.0...v1.4.0) (2023-07-06) diff --git a/vendor/cloud.google.com/go/cloudsqlconn/README.md b/vendor/cloud.google.com/go/cloudsqlconn/README.md index 4ba52925..d8205839 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/README.md +++ b/vendor/cloud.google.com/go/cloudsqlconn/README.md @@ -18,7 +18,8 @@ [codelab]: https://codelabs.developers.google.com/codelabs/cloud-sql-go-connector The _Cloud SQL Go Connector_ is a Cloud SQL connector designed for use with the -Go language. Using a Cloud SQL connector provides the following benefits: +Go language. Using a Cloud SQL connector provides a native alternative to the +[Cloud SQL Auth Proxy][] while providing the following benefits: * **IAM Authorization:** uses IAM permissions to control who/what can connect to your Cloud SQL instances @@ -31,6 +32,7 @@ Go language. Using a Cloud SQL connector provides the following benefits: [Cloud SQL’s automatic IAM DB AuthN][iam-db-authn] feature. [iam-db-authn]: https://cloud.google.com/sql/docs/postgres/authentication +[Cloud SQL Auth Proxy]: https://cloud.google.com/sql/docs/postgres/sql-proxy For users migrating from the Cloud SQL Proxy drivers, see the [migration guide](./migration-guide.md). @@ -277,6 +279,53 @@ d, err := cloudsqlconn.NewDialer( ) ``` +### Automatic IAM Database Authentication + +Connections using [Automatic IAM database authentication][] are supported when +using Postgres or MySQL drivers. + +Make sure to [configure your Cloud SQL Instance to allow IAM authentication][configure-iam-authn] +and [add an IAM database user][add-iam-user]. + +A `Dialer` can be configured to connect to a Cloud SQL instance using +automatic IAM database authentication with the `WithIAMAuthN` Option +(recommended) or the `WithDialIAMAuthN` DialOption. + +```go +d, err := cloudsqlconn.NewDialer(ctx, cloudsqlconn.WithIAMAuthN()) +``` + +When configuring the DSN for IAM authentication, the `password` field can be +omitted and the `user` field should be formatted as follows: +> Postgres: For an IAM user account, this is the user's email address. +> For a service account, it is the service account's email without the +> `.gserviceaccount.com` domain suffix. +> +> MySQL: For an IAM user account, this is the user's email address, without +> the `@` or domain name. For example, for `test-user@gmail.com`, set the +> `user` field to `test-user`. For a service account, this is the service +> account's email address without the `@project-id.iam.gserviceaccount.com` +> suffix. + +Example DSNs using the `test-sa@test-project.iam.gserviceaccount.com` +service account to connect can be found below. + +**Postgres**: + +```go +dsn := "user=test-sa@test-project.iam dbname=mydb sslmode=disable" +``` + +**MySQL**: + +```go +dsn := "user=test-sa dbname=mydb sslmode=disable" +``` + +[Automatic IAM database authentication]: https://cloud.google.com/sql/docs/postgres/authentication#automatic +[configure-iam-authn]: https://cloud.google.com/sql/docs/postgres/create-edit-iam-instances#configure-iam-db-instance +[add-iam-user]: https://cloud.google.com/sql/docs/postgres/create-manage-iam-users#creating-a-database-user + ### Enabling Metrics and Tracing This library includes support for metrics and tracing using [OpenCensus][]. @@ -330,6 +379,50 @@ func main() { // ... } ``` + +As OpenTelemetry has now reached feature parity with OpenCensus, the migration +from OpenCensus to OpenTelemetry is strongly encouraged. +[OpenTelemetry bridge](https://github.com/open-telemetry/opentelemetry-go/tree/main/bridge/opencensus) +can be leveraged to migrate to OpenTelemetry without the need of replacing the +OpenCensus APIs in this library. Example code is shown below for migrating an +application using the OpenTelemetry bridge for traces. + +```golang +import ( + texporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace" + "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/bridge/opencensus" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "google.golang.org/api/option" +) + +func main() { + // trace.AlwaysSample() is expensive. Replacing it with your own + // sampler for production environments is recommended. + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + + exporter, err := texporter.New( + texporter.WithTraceClientOptions([]option.ClientOption{option.WithTelemetryDisabled()}), + texporter.WithProjectID("mycoolproject"), + ) + if err != nil { + // Handle error + } + + tp := sdktrace.NewTracerProvider(sdktrace.WithSyncer(exporter)) + otel.SetTracerProvider(tp) + tracer := tp.Tracer("Cloud SQL Go Connector Trace") + trace.DefaultTracer = opencensus.NewTracer(tracer) + + // Use cloudsqlconn as usual. + // ... +} +``` + +A known OpenTelemetry issue has been reported [here](https://github.com/googleapis/google-cloud-go/issues/7100). +It shouldn't impact database operations. + [OpenCensus]: https://opencensus.io/ [exporter]: https://opencensus.io/exporters/ [Cloud Monitoring]: https://cloud.google.com/monitoring @@ -353,7 +446,7 @@ supported for 1 year. **Unsupported** - Any major version that has been deprecated for >=1 year is considered unsupported. -## Supported Go Versions +### Supported Go Versions We follow the [Go Version Support Policy][go-policy] used by Google Cloud Libraries for Go. diff --git a/vendor/cloud.google.com/go/cloudsqlconn/connect_tls_117.go b/vendor/cloud.google.com/go/cloudsqlconn/connect_tls_117.go deleted file mode 100644 index 1ffa193f..00000000 --- a/vendor/cloud.google.com/go/cloudsqlconn/connect_tls_117.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package cloudsqlconn - -import ( - "context" - "crypto/tls" - "net" - - "cloud.google.com/go/cloudsqlconn/errtype" - "cloud.google.com/go/cloudsqlconn/internal/cloudsql" -) - -// connectTLS returns a new TLS client side connection -// using conn as the underlying transport. -// -// The returned connection has already completed its TLS handshake. -func connectTLS(ctx context.Context, conn net.Conn, c *tls.Config, i *cloudsql.Instance) (net.Conn, error) { - tlsConn := tls.Client(conn, c) - // HandshakeContext was introduced in Go 1.17, hence - // this file is conditionally compiled on only Go versions >= 1.17. - if err := tlsConn.HandshakeContext(ctx); err != nil { - // refresh the instance info in case it caused the handshake failure - i.ForceRefresh() - _ = tlsConn.Close() // best effort close attempt - return nil, errtype.NewDialError("handshake failed", i.String(), err) - } - return tlsConn, nil -} diff --git a/vendor/cloud.google.com/go/cloudsqlconn/connect_tls_other.go b/vendor/cloud.google.com/go/cloudsqlconn/connect_tls_other.go deleted file mode 100644 index 56f117b6..00000000 --- a/vendor/cloud.google.com/go/cloudsqlconn/connect_tls_other.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2023 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.17 -// +build !go1.17 - -package cloudsqlconn - -import ( - "context" - "crypto/tls" - "net" - - "cloud.google.com/go/cloudsqlconn/errtype" - "cloud.google.com/go/cloudsqlconn/internal/cloudsql" -) - -// connectTLS returns a new TLS client side connection -// using conn as the underlying transport. -// -// The returned connection has already completed its TLS handshake. -func connectTLS(_ context.Context, conn net.Conn, c *tls.Config, i *cloudsql.Instance) (net.Conn, error) { - tlsConn := tls.Client(conn, c) - if err := tlsConn.Handshake(); err != nil { - // refresh the instance info in case it caused the handshake failure - i.ForceRefresh() - _ = tlsConn.Close() // best effort close attempt - return nil, errtype.NewDialError("handshake failed", i.String(), err) - } - return tlsConn, nil -} diff --git a/vendor/cloud.google.com/go/cloudsqlconn/dialer.go b/vendor/cloud.google.com/go/cloudsqlconn/dialer.go index 6f4c7be1..e3418742 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/dialer.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/dialer.go @@ -12,17 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package cloudsqlconn contains methods for creating secure, authorized -// connections to a Cloud SQL instance. package cloudsqlconn import ( "context" "crypto/rand" "crypto/rsa" + "crypto/tls" _ "embed" "errors" "fmt" + "io" "net" "strings" "sync" @@ -30,6 +30,7 @@ import ( "time" "cloud.google.com/go/cloudsqlconn/errtype" + "cloud.google.com/go/cloudsqlconn/instance" "cloud.google.com/go/cloudsqlconn/internal/cloudsql" "cloud.google.com/go/cloudsqlconn/internal/trace" "github.com/google/uuid" @@ -69,6 +70,16 @@ func getDefaultKeys() (*rsa.PrivateKey, error) { return defaultKey, defaultKeyErr } +type connectionInfoCache interface { + OpenConns() *uint64 + + ConnectInfo(context.Context, string) (string, *tls.Config, error) + InstanceEngineVersion(context.Context) (string, error) + UpdateRefresh(*bool) + ForceRefresh() + io.Closer +} + // A Dialer is used to create connections to Cloud SQL instances. // // Use NewDialer to initialize a Dialer. @@ -76,15 +87,15 @@ type Dialer struct { lock sync.RWMutex // instances map connection names (e.g., my-project:us-central1:my-instance) // to *cloudsql.Instance types. - instances map[cloudsql.ConnName]*cloudsql.Instance + instances map[instance.ConnName]connectionInfoCache key *rsa.PrivateKey refreshTimeout time.Duration sqladmin *sqladmin.Service - // defaultDialCfg holds the constructor level DialOptions, so that it can - // be copied and mutated by the Dial function. - defaultDialCfg dialCfg + // defaultDialConfig holds the constructor level DialOptions, so that it + // can be copied and mutated by the Dial function. + defaultDialConfig dialConfig // dialerID uniquely identifies a Dialer. Used for monitoring purposes, // *only* when a client has configured OpenCensus exporters. @@ -158,12 +169,10 @@ func NewDialer(ctx context.Context, opts ...Option) (*Dialer, error) { return nil, fmt.Errorf("failed to create sqladmin client: %v", err) } - dc := dialCfg{ + dc := dialConfig{ ipType: cloudsql.PublicIP, tcpKeepAlive: defaultTCPKeepAlive, - refreshCfg: cloudsql.RefreshCfg{ - UseIAMAuthN: cfg.useIAMAuthN, - }, + useIAMAuthN: cfg.useIAMAuthN, } for _, opt := range cfg.dialOpts { opt(&dc) @@ -173,52 +182,76 @@ func NewDialer(ctx context.Context, opts ...Option) (*Dialer, error) { return nil, err } d := &Dialer{ - instances: make(map[cloudsql.ConnName]*cloudsql.Instance), - key: cfg.rsaKey, - refreshTimeout: cfg.refreshTimeout, - sqladmin: client, - defaultDialCfg: dc, - dialerID: uuid.New().String(), - iamTokenSource: cfg.iamLoginTokenSource, - dialFunc: cfg.dialFunc, + instances: make(map[instance.ConnName]connectionInfoCache), + key: cfg.rsaKey, + refreshTimeout: cfg.refreshTimeout, + sqladmin: client, + defaultDialConfig: dc, + dialerID: uuid.New().String(), + iamTokenSource: cfg.iamLoginTokenSource, + dialFunc: cfg.dialFunc, } return d, nil } -// Dial returns a net.Conn connected to the specified Cloud SQL instance. The instance argument must be the -// instance's connection name, which is in the format "project-name:region:instance-name". -func (d *Dialer) Dial(ctx context.Context, instance string, opts ...DialOption) (conn net.Conn, err error) { +// Dial returns a net.Conn connected to the specified Cloud SQL instance. The +// icn argument must be the instance's connection name, which is in the format +// "project-name:region:instance-name". +func (d *Dialer) Dial(ctx context.Context, icn string, opts ...DialOption) (conn net.Conn, err error) { startTime := time.Now() var endDial trace.EndSpanFunc ctx, endDial = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn.Dial", - trace.AddInstanceName(instance), + trace.AddInstanceName(icn), trace.AddDialerID(d.dialerID), ) defer func() { - go trace.RecordDialError(context.Background(), instance, d.dialerID, err) + go trace.RecordDialError(context.Background(), icn, d.dialerID, err) endDial(err) }() - cn, err := cloudsql.ParseConnName(instance) + cn, err := instance.ParseConnName(icn) if err != nil { return nil, err } - cfg := d.defaultDialCfg + cfg := d.defaultDialConfig for _, opt := range opts { opt(&cfg) } var endInfo trace.EndSpanFunc ctx, endInfo = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.InstanceInfo") - i := d.instance(cn, &cfg.refreshCfg) - addr, tlsCfg, err := i.ConnectInfo(ctx, cfg.ipType) + i := d.instance(cn, &cfg.useIAMAuthN) + addr, tlsConfig, err := i.ConnectInfo(ctx, cfg.ipType) if err != nil { - d.removeInstance(i) + d.lock.Lock() + defer d.lock.Unlock() + // Stop all background refreshes + i.Close() + delete(d.instances, cn) endInfo(err) return nil, err } endInfo(err) + // If the client certificate has expired (as when the computer goes to + // sleep, and the refresh cycle cannot run), force a refresh immediately. + // The TLS handshake will not fail on an expired client certificate. It's + // not until the first read where the client cert error will be surfaced. + // So check that the certificate is valid before proceeding. + if invalidClientCert(tlsConfig) { + i.ForceRefresh() + // Block on refreshed connection info + addr, tlsConfig, err = i.ConnectInfo(ctx, cfg.ipType) + if err != nil { + d.lock.Lock() + defer d.lock.Unlock() + // Stop all background refreshes + i.Close() + delete(d.instances, cn) + return nil, err + } + } + var connectEnd trace.EndSpanFunc ctx, connectEnd = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.Connect") defer func() { connectEnd(err) }() @@ -231,38 +264,57 @@ func (d *Dialer) Dial(ctx context.Context, instance string, opts ...DialOption) if err != nil { // refresh the instance info in case it caused the connection failure i.ForceRefresh() - return nil, errtype.NewDialError("failed to dial", i.String(), err) + return nil, errtype.NewDialError("failed to dial", cn.String(), err) } if c, ok := conn.(*net.TCPConn); ok { if err := c.SetKeepAlive(true); err != nil { - return nil, errtype.NewDialError("failed to set keep-alive", i.String(), err) + return nil, errtype.NewDialError("failed to set keep-alive", cn.String(), err) } if err := c.SetKeepAlivePeriod(cfg.tcpKeepAlive); err != nil { - return nil, errtype.NewDialError("failed to set keep-alive period", i.String(), err) + return nil, errtype.NewDialError("failed to set keep-alive period", cn.String(), err) } } - tlsConn, err := connectTLS(ctx, conn, tlsCfg, i) - if err != nil { - return nil, err + + tlsConn := tls.Client(conn, tlsConfig) + if err := tlsConn.HandshakeContext(ctx); err != nil { + // refresh the instance info in case it caused the handshake failure + i.ForceRefresh() + _ = tlsConn.Close() // best effort close attempt + return nil, errtype.NewDialError("handshake failed", cn.String(), err) } + latency := time.Since(startTime).Milliseconds() go func() { - n := atomic.AddUint64(&i.OpenConns, 1) - trace.RecordOpenConnections(ctx, int64(n), d.dialerID, i.String()) - trace.RecordDialLatency(ctx, instance, d.dialerID, latency) + n := atomic.AddUint64(i.OpenConns(), 1) + trace.RecordOpenConnections(ctx, int64(n), d.dialerID, cn.String()) + trace.RecordDialLatency(ctx, icn, d.dialerID, latency) }() return newInstrumentedConn(tlsConn, func() { - n := atomic.AddUint64(&i.OpenConns, ^uint64(0)) - trace.RecordOpenConnections(context.Background(), int64(n), d.dialerID, i.String()) + n := atomic.AddUint64(i.OpenConns(), ^uint64(0)) + trace.RecordOpenConnections(context.Background(), int64(n), d.dialerID, cn.String()) }), nil } -// EngineVersion returns the engine type and version for the instance. The value will -// correspond to one of the following types for the instance: +func invalidClientCert(c *tls.Config) bool { + // The following conditions should be impossible (no certs, nil leaf), but + // just in case there's an unknown edge case, check assumptions before + // proceeding. + if len(c.Certificates) == 0 { + return true + } + if c.Certificates[0].Leaf == nil { + return true + } + return time.Now().After(c.Certificates[0].Leaf.NotAfter) +} + +// EngineVersion returns the engine type and version for the instance +// connection name. The value will correspond to one of the following types for +// the instance: // https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion -func (d *Dialer) EngineVersion(ctx context.Context, instance string) (string, error) { - cn, err := cloudsql.ParseConnName(instance) +func (d *Dialer) EngineVersion(ctx context.Context, icn string) (string, error) { + cn, err := instance.ParseConnName(icn) if err != nil { return "", err } @@ -274,18 +326,19 @@ func (d *Dialer) EngineVersion(ctx context.Context, instance string) (string, er return e, nil } -// Warmup starts the background refresh necessary to connect to the instance. Use Warmup -// to start the refresh process early if you don't know when you'll need to call "Dial". -func (d *Dialer) Warmup(_ context.Context, instance string, opts ...DialOption) error { - cn, err := cloudsql.ParseConnName(instance) +// Warmup starts the background refresh necessary to connect to the instance. +// Use Warmup to start the refresh process early if you don't know when you'll +// need to call "Dial". +func (d *Dialer) Warmup(_ context.Context, icn string, opts ...DialOption) error { + cn, err := instance.ParseConnName(icn) if err != nil { return err } - cfg := d.defaultDialCfg + cfg := d.defaultDialConfig for _, opt := range opts { opt(&cfg) } - _ = d.instance(cn, &cfg.refreshCfg) + _ = d.instance(cn, &cfg.useIAMAuthN) return nil } @@ -328,39 +381,30 @@ func (d *Dialer) Close() error { return nil } -// instance is a helper function for returning the appropriate instance object in a threadsafe way. -// It will create a new instance object, modify the existing one, or leave it unchanged as needed. -func (d *Dialer) instance(cn cloudsql.ConnName, r *cloudsql.RefreshCfg) *cloudsql.Instance { - // Check instance cache +// instance is a helper function for returning the appropriate instance object +// in a threadsafe way. It will create a new instance object, modify the +// existing one, or leave it unchanged as needed. +func (d *Dialer) instance(cn instance.ConnName, useIAMAuthN *bool) connectionInfoCache { d.lock.RLock() i, ok := d.instances[cn] d.lock.RUnlock() - // If the instance hasn't been created yet or if the refreshCfg has changed - if !ok || (r != nil && *r != i.RefreshCfg) { + if !ok { d.lock.Lock() + defer d.lock.Unlock() // Recheck to ensure instance wasn't created or changed between locks i, ok = d.instances[cn] if !ok { - // Create a new instance - if r == nil { - r = &d.defaultDialCfg.refreshCfg + var useIAMAuthNDial bool + if useIAMAuthN != nil { + useIAMAuthNDial = *useIAMAuthN } i = cloudsql.NewInstance(cn, d.sqladmin, d.key, - d.refreshTimeout, d.iamTokenSource, d.dialerID, *r) + d.refreshTimeout, d.iamTokenSource, d.dialerID, useIAMAuthNDial) d.instances[cn] = i - } else if r != nil && *r != i.RefreshCfg { - // Update the instance with the new refresh cfg - i.UpdateRefresh(*r) } - d.lock.Unlock() } - return i -} -func (d *Dialer) removeInstance(i *cloudsql.Instance) { - d.lock.Lock() - defer d.lock.Unlock() - // Stop all background refreshes - i.Close() - delete(d.instances, i.ConnName) + i.UpdateRefresh(useIAMAuthN) + + return i } diff --git a/vendor/cloud.google.com/go/cloudsqlconn/doc.go b/vendor/cloud.google.com/go/cloudsqlconn/doc.go new file mode 100644 index 00000000..da602cc7 --- /dev/null +++ b/vendor/cloud.google.com/go/cloudsqlconn/doc.go @@ -0,0 +1,167 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cloudsqlconn provides functions for authorizing and encrypting +// connections. These functions can be used with a database driver to +// connect to a Cloud SQL instance. +// +// The instance connection name for a Cloud SQL instance is always in the +// format "project:region:instance". +// +// # Creating a Dialer +// +// To start working with this package, create a Dialer. There are two ways of +// creating a Dialer, which one you use depends on your database driver. +// +// # Postgres +// +// Postgres users have the option of using the [database/sql] interface or using [pgx] directly. +// +// To use a dialer with [pgx], we recommend using connection pooling with +// [pgxpool]. To create the dialer use the NewDialer func. +// +// import ( +// "context" +// "net" +// +// "cloud.google.com/go/cloudsqlconn" +// "github.com/jackc/pgx/v4/pgxpool" +// ) +// +// func connect() { +// // Configure the driver to connect to the database +// dsn := "user=myuser password=mypass dbname=mydb sslmode=disable" +// config, err := pgxpool.ParseConfig(dsn) +// if err != nil { +// // handle error +// } +// +// // Create a new dialer with any options +// d, err := cloudsqlconn.NewDialer(context.Background()) +// if err != nil { +// // handle error +// } +// +// // Tell the driver to use the Cloud SQL Go Connector to create connections +// config.ConnConfig.DialFunc = func(ctx context.Context, _ string, instance string) (net.Conn, error) { +// return d.Dial(ctx, "project:region:instance") +// } +// +// // Interact with the driver directly as you normally would +// conn, err := pgxpool.ConnectConfig(context.Background(), config) +// if err != nil { +// // handle error +// } +// +// // call cleanup when you're done with the database connection +// cleanup := func() error { return d.Close() } +// // ... etc +// } +// +// To use [database/sql], call pgxv4.RegisterDriver with any necessary Dialer +// configuration. +// +// Note: the connection string must use the keyword/value format +// with host set to the instance connection name. The returned cleanup func +// will stop the dialer's background refresh goroutine and so should only be +// called when you're done with the Dialer. +// +// import ( +// "database/sql" +// +// "cloud.google.com/go/cloudsqlconn" +// "cloud.google.com/go/cloudsqlconn/postgres/pgxv4" +// ) +// +// func connect() { +// // adjust options as needed +// cleanup, err := pgxv4.RegisterDriver("cloudsql-postgres", cloudsqlconn.WithIAMAuthN()) +// if err != nil { +// // ... handle error +// } +// // call cleanup when you're done with the database connection +// defer cleanup() +// +// db, err := sql.Open( +// "cloudsql-postgres", +// "host=project:region:instance user=myuser password=mypass dbname=mydb sslmode=disable", +// ) +// // ... etc +// } +// +// # MySQL +// +// MySQL users should use [database/sql]. Use mysql.RegisterDriver with any +// necessary Dialer configuration. +// +// Note: The returned cleanup func will stop the dialer's background refresh +// goroutine and should only be called when you're done with the Dialer. +// +// import ( +// "database/sql" +// +// "cloud.google.com/go/cloudsqlconn" +// "cloud.google.com/go/cloudsqlconn/mysql/mysql" +// ) +// +// func connect() { +// // adjust options as needed +// cleanup, err := mysql.RegisterDriver("cloudsql-mysql", cloudsqlconn.WithIAMAuthN()) +// if err != nil { +// // ... handle error +// } +// // call cleanup when you're done with the database connection +// defer cleanup() +// +// db, err := sql.Open( +// "cloudsql-mysql", +// "myuser:mypass@cloudsql-mysql(project:region:instance)/mydb", +// ) +// // ... etc +// } +// +// # SQL Server +// +// SQL Server users should use [database/sql]. Use mssql.RegisterDriver with any +// necessary Dialer configuration. +// +// Note: The returned cleanup func will stop the dialer's background refresh +// goroutine and should only be called when you're done with the Dialer. +// +// import ( +// "database/sql" +// +// "cloud.google.com/go/cloudsqlconn" +// "cloud.google.com/go/cloudsqlconn/sqlserver/mssql" +// ) +// +// func connect() { +// cleanup, err := mssql.RegisterDriver("cloudsql-sqlserver") +// if err != nil { +// // ... handle error +// } +// // call cleanup when you're done with the database connection +// defer cleanup() +// +// db, err := sql.Open( +// "cloudsql-sqlserver", +// "sqlserver://user:password@localhost?database=mydb&cloudsql=project:region:instance", +// ) +// // ... etc +// } +// +// [database/sql]: https://pkg.go.dev/database/sql +// [pgx]: https://github.com/jackc/pgx +// [pgxpool]: https://pkg.go.dev/github.com/jackc/pgx/v4/pgxpool +package cloudsqlconn diff --git a/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go b/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go new file mode 100644 index 00000000..d4f33601 --- /dev/null +++ b/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go @@ -0,0 +1,76 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instance + +import ( + "fmt" + "regexp" + + "cloud.google.com/go/cloudsqlconn/errtype" +) + +var ( + // Instance connection name is the format :: + // Additionally, we have to support legacy "domain-scoped" projects + // (e.g. "google.com:PROJECT") + connNameRegex = regexp.MustCompile("([^:]+(:[^:]+)?):([^:]+):([^:]+)") +) + +// ConnName represents the "instance connection name", in the format +// "project:region:name". +type ConnName struct { + project string + region string + name string +} + +func (c *ConnName) String() string { + return fmt.Sprintf("%s:%s:%s", c.project, c.region, c.name) +} + +// Project returns the project within which the Cloud SQL instance runs. +func (c *ConnName) Project() string { + return c.project +} + +// Region returns the region where the Cloud SQL instance runs. +func (c *ConnName) Region() string { + return c.region +} + +// Name returns the Cloud SQL instance name +func (c *ConnName) Name() string { + return c.name +} + +// ParseConnName initializes a new ConnName struct. +func ParseConnName(cn string) (ConnName, error) { + b := []byte(cn) + m := connNameRegex.FindSubmatch(b) + if m == nil { + err := errtype.NewConfigError( + "invalid instance connection name, expected PROJECT:REGION:INSTANCE", + cn, + ) + return ConnName{}, err + } + + c := ConnName{ + project: string(m[1]), + region: string(m[3]), + name: string(m[4]), + } + return c, nil +} diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go index cbbb79a7..57e05543 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go @@ -19,11 +19,11 @@ import ( "crypto/rsa" "crypto/tls" "fmt" - "regexp" "sync" "time" "cloud.google.com/go/cloudsqlconn/errtype" + "cloud.google.com/go/cloudsqlconn/instance" "golang.org/x/oauth2" "golang.org/x/time/rate" sqladmin "google.golang.org/api/sqladmin/v1beta4" @@ -47,45 +47,6 @@ const ( refreshBurst = 2 ) -var ( - // Instance connection name is the format :: - // Additionally, we have to support legacy "domain-scoped" projects - // (e.g. "google.com:PROJECT") - connNameRegex = regexp.MustCompile("([^:]+(:[^:]+)?):([^:]+):([^:]+)") -) - -// ConnName represents the "instance connection name", in the format -// "project:region:name". -type ConnName struct { - project string - region string - name string -} - -func (c *ConnName) String() string { - return fmt.Sprintf("%s:%s:%s", c.project, c.region, c.name) -} - -// ParseConnName initializes a new ConnName struct. -func ParseConnName(cn string) (ConnName, error) { - b := []byte(cn) - m := connNameRegex.FindSubmatch(b) - if m == nil { - err := errtype.NewConfigError( - "invalid instance connection name, expected PROJECT:REGION:INSTANCE", - cn, - ) - return ConnName{}, err - } - - c := ConnName{ - project: string(m[1]), - region: string(m[3]), - name: string(m[4]), - } - return c, nil -} - // refreshOperation is a pending result of a refresh operation of data used to // connect securely. It should only be initialized by the Instance struct as // part of a refresh cycle. @@ -120,21 +81,16 @@ func (r *refreshOperation) isValid() bool { } } -// RefreshCfg is a collection of attributes that trigger new refresh operations. -type RefreshCfg struct { - UseIAMAuthN bool -} - // Instance manages the information used to connect to the Cloud SQL instance // by periodically calling the Cloud SQL Admin API. It automatically refreshes // the required information approximately 4 minutes before the previous // certificate expires (every ~56 minutes). type Instance struct { - // OpenConns is the number of open connections to the instance. - OpenConns uint64 + // openConns is the number of open connections to the instance. + openConns uint64 - ConnName - key *rsa.PrivateKey + connName instance.ConnName + key *rsa.PrivateKey // refreshTimeout sets the maximum duration a refresh cycle can run // for. @@ -143,8 +99,8 @@ type Instance struct { l *rate.Limiter r refresher - refreshLock sync.RWMutex - RefreshCfg RefreshCfg + mu sync.RWMutex + useIAMAuthNDial bool // cur represents the current refreshOperation that will be used to // create connections. If a valid complete refreshOperation isn't // available it's possible for cur to be equal to next. @@ -161,17 +117,17 @@ type Instance struct { // NewInstance initializes a new Instance given an instance connection name func NewInstance( - cn ConnName, + cn instance.ConnName, client *sqladmin.Service, key *rsa.PrivateKey, refreshTimeout time.Duration, ts oauth2.TokenSource, dialerID string, - r RefreshCfg, + useIAMAuthNDial bool, ) *Instance { ctx, cancel := context.WithCancel(context.Background()) i := &Instance{ - ConnName: cn, + connName: cn, key: key, l: rate.NewLimiter(rate.Every(refreshInterval), refreshBurst), r: newRefresher( @@ -179,24 +135,31 @@ func NewInstance( ts, dialerID, ), - refreshTimeout: refreshTimeout, - RefreshCfg: r, - ctx: ctx, - cancel: cancel, + refreshTimeout: refreshTimeout, + useIAMAuthNDial: useIAMAuthNDial, + ctx: ctx, + cancel: cancel, } // For the initial refresh operation, set cur = next so that connection // requests block until the first refresh is complete. - i.refreshLock.Lock() + i.mu.Lock() i.cur = i.scheduleRefresh(0) i.next = i.cur - i.refreshLock.Unlock() + i.mu.Unlock() return i } +// OpenConns returns a pointer to the number of open connections to +// faciliate changing the value using atomics. +func (i *Instance) OpenConns() *uint64 { + return &i.openConns +} + // Close closes the instance; it stops the refresh cycle and prevents it from // making additional calls to the Cloud SQL Admin API. -func (i *Instance) Close() { +func (i *Instance) Close() error { i.cancel() + return nil } // ConnectInfo returns an IP address specified by ipType (i.e., public or @@ -225,7 +188,7 @@ func (i *Instance) ConnectInfo(ctx context.Context, ipType string) (string, *tls if !ok { err := errtype.NewConfigError( fmt.Sprintf("instance does not have IP of type %q", ipType), - i.String(), + i.connName.String(), ) return "", nil, err } @@ -244,40 +207,47 @@ func (i *Instance) InstanceEngineVersion(ctx context.Context) (string, error) { } // UpdateRefresh cancels all existing refresh attempts and schedules new -// attempts with the provided config. -func (i *Instance) UpdateRefresh(cfg RefreshCfg) { - i.refreshLock.Lock() - defer i.refreshLock.Unlock() - // Cancel any pending refreshes - i.cur.cancel() - i.next.cancel() - // update the refresh config as needed - i.RefreshCfg = cfg - // reschedule a new refresh immediately - i.cur = i.scheduleRefresh(0) - i.next = i.cur +// attempts with the provided config only if it differs from the current +// configuration. +func (i *Instance) UpdateRefresh(useIAMAuthNDial *bool) { + i.mu.Lock() + defer i.mu.Unlock() + if useIAMAuthNDial != nil && *useIAMAuthNDial != i.useIAMAuthNDial { + // Cancel any pending refreshes + i.cur.cancel() + i.next.cancel() + + i.useIAMAuthNDial = *useIAMAuthNDial + // reschedule a new refresh immediately + i.cur = i.scheduleRefresh(0) + i.next = i.cur + } } // ForceRefresh triggers an immediate refresh operation to be scheduled and -// used for future connection attempts. +// used for future connection attempts. Until the refresh completes, the +// existing connection info will be available for use if valid. func (i *Instance) ForceRefresh() { - i.refreshLock.Lock() - defer i.refreshLock.Unlock() - // If the next refresh hasn't started yet, we can cancel it and start - // an immediate one + i.mu.Lock() + defer i.mu.Unlock() + // If the next refresh hasn't started yet, we can cancel it and start an + // immediate one if i.next.cancel() { i.next = i.scheduleRefresh(0) } // block all sequential connection attempts on the next refresh operation - i.cur = i.next + // if current is invalid + if !i.cur.isValid() { + i.cur = i.next + } } // refreshOperation returns the most recent refresh operation // waiting for it to complete if necessary func (i *Instance) refreshOperation(ctx context.Context) (*refreshOperation, error) { - i.refreshLock.RLock() + i.mu.RLock() cur := i.cur - i.refreshLock.RUnlock() + i.mu.RUnlock() var err error select { case <-cur.ready: @@ -324,12 +294,12 @@ func (i *Instance) scheduleRefresh(d time.Duration) *refreshOperation { if err != nil { r.err = errtype.NewDialError( "context was canceled or expired before refresh completed", - i.ConnName.String(), + i.connName.String(), nil, ) } else { r.result, r.err = i.r.performRefresh( - ctx, i.ConnName, i.key, i.RefreshCfg.UseIAMAuthN) + ctx, i.connName, i.key, i.useIAMAuthNDial) } close(r.ready) @@ -343,8 +313,8 @@ func (i *Instance) scheduleRefresh(d time.Duration) *refreshOperation { // Once the refresh is complete, update "current" with working // refreshOperation and schedule a new refresh - i.refreshLock.Lock() - defer i.refreshLock.Unlock() + i.mu.Lock() + defer i.mu.Unlock() // if failed, scheduled the next refresh immediately if r.err != nil { @@ -369,8 +339,3 @@ func (i *Instance) scheduleRefresh(d time.Duration) *refreshOperation { }) return r } - -// String returns the instance's connection name. -func (i *Instance) String() string { - return i.ConnName.String() -} diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go index 581bd14b..38e3a453 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go @@ -25,6 +25,7 @@ import ( "time" "cloud.google.com/go/cloudsqlconn/errtype" + "cloud.google.com/go/cloudsqlconn/instance" "cloud.google.com/go/cloudsqlconn/internal/trace" "golang.org/x/oauth2" sqladmin "google.golang.org/api/sqladmin/v1beta4" @@ -53,17 +54,23 @@ type metadata struct { // fetchMetadata uses the Cloud SQL Admin APIs get method to retrieve the // information about a Cloud SQL instance that is used to create secure // connections. -func fetchMetadata(ctx context.Context, client *sqladmin.Service, inst ConnName) (m metadata, err error) { +func fetchMetadata( + ctx context.Context, client *sqladmin.Service, inst instance.ConnName, +) (m metadata, err error) { + var end trace.EndSpanFunc ctx, end = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.FetchMetadata") defer func() { end(err) }() - db, err := client.Connect.Get(inst.project, inst.name).Context(ctx).Do() + db, err := client.Connect.Get(inst.Project(), inst.Name()).Context(ctx).Do() if err != nil { return metadata{}, errtype.NewRefreshError("failed to get instance metadata", inst.String(), err) } // validate the instance is supported for authenticated connections - if db.Region != inst.region { - msg := fmt.Sprintf("provided region was mismatched - got %s, want %s", inst.region, db.Region) + if db.Region != inst.Region() { + msg := fmt.Sprintf( + "provided region was mismatched - got %s, want %s", + inst.Region(), db.Region, + ) return metadata{}, errtype.NewConfigError(msg, inst.String()) } if db.BackendType != "SECOND_GEN" { @@ -135,7 +142,7 @@ func refreshToken(ts oauth2.TokenSource, tok *oauth2.Token) (*oauth2.Token, erro func fetchEphemeralCert( ctx context.Context, client *sqladmin.Service, - inst ConnName, + inst instance.ConnName, key *rsa.PrivateKey, ts oauth2.TokenSource, ) (c tls.Certificate, err error) { @@ -173,7 +180,9 @@ func fetchEphemeralCert( } req.AccessToken = tok.AccessToken } - resp, err := client.Connect.GenerateEphemeralCert(inst.project, inst.name, &req).Context(ctx).Do() + resp, err := client.Connect.GenerateEphemeralCert( + inst.Project(), inst.Name(), &req, + ).Context(ctx).Do() if err != nil { return tls.Certificate{}, errtype.NewRefreshError( "create ephemeral cert failed", @@ -216,7 +225,7 @@ func fetchEphemeralCert( } // createTLSConfig returns a *tls.Config for connecting securely to the Cloud SQL instance. -func createTLSConfig(inst ConnName, m metadata, cert tls.Certificate) *tls.Config { +func createTLSConfig(inst instance.ConnName, m metadata, cert tls.Certificate) *tls.Config { certs := x509.NewCertPool() certs.AddCert(m.serverCaCert) @@ -243,7 +252,7 @@ func createTLSConfig(inst ConnName, m metadata, cert tls.Certificate) *tls.Confi // our own because CloudSQL instances use the instance name (e.g., // my-project:my-instance) instead of a valid domain name for the certificate's // Common Name. -func genVerifyPeerCertificateFunc(cn ConnName, pool *x509.CertPool) func(rawCerts [][]byte, _ [][]*x509.Certificate) error { +func genVerifyPeerCertificateFunc(cn instance.ConnName, pool *x509.CertPool) func(rawCerts [][]byte, _ [][]*x509.Certificate) error { return func(rawCerts [][]byte, _ [][]*x509.Certificate) error { if len(rawCerts) == 0 { return errtype.NewDialError("no certificate to verify", cn.String(), nil) @@ -259,7 +268,7 @@ func genVerifyPeerCertificateFunc(cn ConnName, pool *x509.CertPool) func(rawCert return errtype.NewDialError("failed to verify certificate", cn.String(), err) } - certInstanceName := fmt.Sprintf("%s:%s", cn.project, cn.name) + certInstanceName := fmt.Sprintf("%s:%s", cn.Project(), cn.Name()) if cert.Subject.CommonName != certInstanceName { return errtype.NewDialError( fmt.Sprintf("certificate had CN %q, expected %q", @@ -306,7 +315,10 @@ type refresher struct { // performRefresh immediately performs a full refresh operation using the Cloud // SQL Admin API. -func (r refresher) performRefresh(ctx context.Context, cn ConnName, k *rsa.PrivateKey, iamAuthN bool) (rr refreshResult, err error) { +func (r refresher) performRefresh( + ctx context.Context, cn instance.ConnName, k *rsa.PrivateKey, iamAuthNDial bool, +) (rr refreshResult, err error) { + var refreshEnd trace.EndSpanFunc ctx, refreshEnd = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.RefreshConnection", trace.AddInstanceName(cn.String()), @@ -337,7 +349,7 @@ func (r refresher) performRefresh(ctx context.Context, cn ConnName, k *rsa.Priva go func() { defer close(ecC) var iamTS oauth2.TokenSource - if iamAuthN { + if iamAuthNDial { iamTS = r.ts } ec, err := fetchEphemeralCert(ctx, r.client, cn, k, iamTS) @@ -355,7 +367,7 @@ func (r refresher) performRefresh(ctx context.Context, cn ConnName, k *rsa.Priva case <-ctx.Done(): return rr, fmt.Errorf("refresh failed: %w", ctx.Err()) } - if iamAuthN { + if iamAuthNDial { if vErr := supportsAutoIAMAuthN(md.version); vErr != nil { return refreshResult{}, vErr } diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/trace.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/trace.go index dc88918d..1be78ddc 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/trace.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/trace.go @@ -73,11 +73,11 @@ func StartSpan(ctx context.Context, name string, attrs ...Attribute) (context.Co func toStatus(err error) trace.Status { if err2, ok := err.(*googleapi.Error); ok { return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} - } else if s, ok := status.FromError(err); ok { + } + if s, ok := status.FromError(err); ok { return trace.Status{Code: int32(s.Code()), Message: s.Message()} - } else { - return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} } + return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} } // Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto diff --git a/vendor/cloud.google.com/go/cloudsqlconn/options.go b/vendor/cloud.google.com/go/cloudsqlconn/options.go index 332241a5..c5645403 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/options.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/options.go @@ -210,18 +210,18 @@ func WithIAMAuthN() Option { } // A DialOption is an option for configuring how a Dialer's Dial call is executed. -type DialOption func(d *dialCfg) +type DialOption func(d *dialConfig) -type dialCfg struct { +type dialConfig struct { dialFunc func(ctx context.Context, network, addr string) (net.Conn, error) ipType string tcpKeepAlive time.Duration - refreshCfg cloudsql.RefreshCfg + useIAMAuthN bool } // DialOptions turns a list of DialOption instances into an DialOption. func DialOptions(opts ...DialOption) DialOption { - return func(cfg *dialCfg) { + return func(cfg *dialConfig) { for _, opt := range opts { opt(cfg) } @@ -232,35 +232,35 @@ func DialOptions(opts ...DialOption) DialOption { // individual call to Dial. To configure a dial function across all invocations // of Dial, use WithDialFunc. func WithOneOffDialFunc(dial func(ctx context.Context, network, addr string) (net.Conn, error)) DialOption { - return func(c *dialCfg) { + return func(c *dialConfig) { c.dialFunc = dial } } // WithTCPKeepAlive returns a DialOption that specifies the tcp keep alive period for the connection returned by Dial. func WithTCPKeepAlive(d time.Duration) DialOption { - return func(cfg *dialCfg) { + return func(cfg *dialConfig) { cfg.tcpKeepAlive = d } } // WithPublicIP returns a DialOption that specifies a public IP will be used to connect. func WithPublicIP() DialOption { - return func(cfg *dialCfg) { + return func(cfg *dialConfig) { cfg.ipType = cloudsql.PublicIP } } // WithPrivateIP returns a DialOption that specifies a private IP (VPC) will be used to connect. func WithPrivateIP() DialOption { - return func(cfg *dialCfg) { + return func(cfg *dialConfig) { cfg.ipType = cloudsql.PrivateIP } } // WithPSC returns a DialOption that specifies a PSC endpoint will be used to connect. func WithPSC() DialOption { - return func(cfg *dialCfg) { + return func(cfg *dialConfig) { cfg.ipType = cloudsql.PSC } } @@ -269,20 +269,20 @@ func WithPSC() DialOption { // otherwise falls back to private IP. This option is present for backwards // compatibility only and is not recommended for use in production. func WithAutoIP() DialOption { - return func(cfg *dialCfg) { + return func(cfg *dialConfig) { cfg.ipType = cloudsql.AutoIP } } // WithDialIAMAuthN allows you to enable or disable IAM Authentication for this -// instance as descibed in the documentation for WithIAMAuthN. This value will -// overide the Dialer-level configuration set with WithIAMAuthN. +// instance as described in the documentation for WithIAMAuthN. This value will +// override the Dialer-level configuration set with WithIAMAuthN. // // WARNING: This DialOption can cause a new Refresh operation to be triggered. // Toggling this option on or off between Dials may cause increased API usage // and/or delayed connection attempts. func WithDialIAMAuthN(b bool) DialOption { - return func(cfg *dialCfg) { - cfg.refreshCfg.UseIAMAuthN = b + return func(cfg *dialConfig) { + cfg.useIAMAuthN = b } } diff --git a/vendor/cloud.google.com/go/cloudsqlconn/postgres/pgxv4/postgres.go b/vendor/cloud.google.com/go/cloudsqlconn/postgres/pgxv4/postgres.go index 4c9f4b3f..5b27ad9a 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/postgres/pgxv4/postgres.go +++ b/vendor/cloud.google.com/go/cloudsqlconn/postgres/pgxv4/postgres.go @@ -59,30 +59,27 @@ type pgDriver struct { // // "host=my-project:us-central1:my-db-instance user=myuser password=mypass" func (p *pgDriver) Open(name string) (driver.Conn, error) { - var ( - dbURI string - ok bool - ) - - p.mu.RLock() - dbURI, ok = p.dbURIs[name] - p.mu.RUnlock() - - if ok { - return stdlib.GetDefaultDriver().Open(dbURI) + dbURI, err := p.dbURI(name) + if err != nil { + return nil, err } + return stdlib.GetDefaultDriver().Open(dbURI) + +} +// dbURI registers a driver using the provided DSN. If the name has already +// been registered, dbURI returns the existing registration. +func (p *pgDriver) dbURI(name string) (string, error) { p.mu.Lock() defer p.mu.Unlock() - // Recheck to ensure dbURI wasn't created between locks - dbURI, ok = p.dbURIs[name] + dbURI, ok := p.dbURIs[name] if ok { - return stdlib.GetDefaultDriver().Open(dbURI) + return dbURI, nil } config, err := pgx.ParseConfig(name) if err != nil { - return nil, err + return "", err } instConnName := config.Config.Host // Extract instance connection name config.Config.Host = "localhost" // Replace it with a default value @@ -93,5 +90,5 @@ func (p *pgDriver) Open(name string) (driver.Conn, error) { dbURI = stdlib.RegisterConnConfig(config) p.dbURIs[name] = dbURI - return stdlib.GetDefaultDriver().Open(dbURI) + return dbURI, nil } diff --git a/vendor/cloud.google.com/go/cloudsqlconn/version.txt b/vendor/cloud.google.com/go/cloudsqlconn/version.txt index 88c5fb89..dc1e644a 100644 --- a/vendor/cloud.google.com/go/cloudsqlconn/version.txt +++ b/vendor/cloud.google.com/go/cloudsqlconn/version.txt @@ -1 +1 @@ -1.4.0 +1.6.0 diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index e939b9f5..540ad16a 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.20.1" +const Version = "1.23.3" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index 7ecc8f2a..5c8411cb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,175 @@ # Release History +## 1.9.1 (2023-12-11) + +### Bugs Fixed + +* The `retry-after-ms` and `x-ms-retry-after-ms` headers weren't being checked during retries. + +### Other Changes + +* Update dependencies. + +## 1.9.0 (2023-11-06) + +### Breaking Changes +> These changes affect only code written against previous beta versions of `v1.7.0` and `v1.8.0` +* The function `NewTokenCredential` has been removed from the `fake` package. Use a literal `&fake.TokenCredential{}` instead. +* The field `TracingNamespace` in `runtime.PipelineOptions` has been replaced by `TracingOptions`. + +### Bugs Fixed + +* Fixed an issue that could cause some allowed HTTP header values to not show up in logs. +* Include error text instead of error type in traces when the transport returns an error. +* Fixed an issue that could cause an HTTP/2 request to hang when the TCP connection becomes unresponsive. +* Block key and SAS authentication for non TLS protected endpoints. +* Passing a `nil` credential value will no longer cause a panic. Instead, the authentication is skipped. +* Calling `Error` on a zero-value `azcore.ResponseError` will no longer panic. +* Fixed an issue in `fake.PagerResponder[T]` that would cause a trailing error to be omitted when iterating over pages. +* Context values created by `azcore` will no longer flow across disjoint HTTP requests. + +### Other Changes + +* Skip generating trace info for no-op tracers. +* The `clientName` paramater in client constructors has been renamed to `moduleName`. + +## 1.9.0-beta.1 (2023-10-05) + +### Other Changes + +* The beta features for tracing and fakes have been reinstated. + +## 1.8.0 (2023-10-05) + +### Features Added + +* This includes the following features from `v1.8.0-beta.N` releases. + * Claims and CAE for authentication. + * New `messaging` package. + * Various helpers in the `runtime` package. + * Deprecation of `runtime.With*` funcs and their replacements in the `policy` package. +* Added types `KeyCredential` and `SASCredential` to the `azcore` package. + * Includes their respective constructor functions. +* Added types `KeyCredentialPolicy` and `SASCredentialPolicy` to the `azcore/runtime` package. + * Includes their respective constructor functions and options types. + +### Breaking Changes +> These changes affect only code written against beta versions of `v1.8.0` +* The beta features for tracing and fakes have been omitted for this release. + +### Bugs Fixed + +* Fixed an issue that could cause some ARM RPs to not be automatically registered. +* Block bearer token authentication for non TLS protected endpoints. + +### Other Changes + +* Updated dependencies. + +## 1.8.0-beta.3 (2023-09-07) + +### Features Added + +* Added function `FetcherForNextLink` and `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL. + +### Bugs Fixed + +* Suppress creating spans for nested SDK API calls. The HTTP span will be a child of the outer API span. + +### Other Changes + +* The following functions in the `runtime` package are now exposed from the `policy` package, and the `runtime` versions have been deprecated. + * `WithCaptureResponse` + * `WithHTTPHeader` + * `WithRetryOptions` + +## 1.7.2 (2023-09-06) + +### Bugs Fixed + +* Fix default HTTP transport to work in WASM modules. + +## 1.8.0-beta.2 (2023-08-14) + +### Features Added + +* Added function `SanitizePagerPollerPath` to the `server` package to centralize sanitization and formalize the contract. +* Added `TokenRequestOptions.EnableCAE` to indicate whether to request a CAE token. + +### Breaking Changes + +> This change affects only code written against beta version `v1.8.0-beta.1`. +* `messaging.CloudEvent` deserializes JSON objects as `[]byte`, instead of `json.RawMessage`. See the documentation for CloudEvent.Data for more information. + +> This change affects only code written against beta versions `v1.7.0-beta.2` and `v1.8.0-beta.1`. +* Removed parameter from method `Span.End()` and its type `tracing.SpanEndOptions`. This API GA'ed in `v1.2.0` so we cannot change it. + +### Bugs Fixed + +* Propagate any query parameters when constructing a fake poller and/or injecting next links. + +## 1.7.1 (2023-08-14) + +## Bugs Fixed + +* Enable TLS renegotiation in the default transport policy. + +## 1.8.0-beta.1 (2023-07-12) + +### Features Added + +- `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec) + +### Other Changes + +* The beta features for CAE, tracing, and fakes have been reinstated. + +## 1.7.0 (2023-07-12) + +### Features Added +* Added method `WithClientName()` to type `azcore.Client` to support shallow cloning of a client with a new name used for tracing. + +### Breaking Changes +> These changes affect only code written against beta versions v1.7.0-beta.1 or v1.7.0-beta.2 +* The beta features for CAE, tracing, and fakes have been omitted for this release. + +## 1.7.0-beta.2 (2023-06-06) + +### Breaking Changes +> These changes affect only code written against beta version v1.7.0-beta.1 +* Method `SpanFromContext()` on type `tracing.Tracer` had the `bool` return value removed. + * This includes the field `SpanFromContext` in supporting type `tracing.TracerOptions`. +* Method `AddError()` has been removed from type `tracing.Span`. +* Method `Span.End()` now requires an argument of type `*tracing.SpanEndOptions`. + +## 1.6.1 (2023-06-06) + +### Bugs Fixed +* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry. + +### Other Changes +* This version contains all bug fixes from `v1.7.0-beta.1` + +## 1.7.0-beta.1 (2023-05-24) + +### Features Added +* Restored CAE support for ARM clients. +* Added supporting features to enable distributed tracing. + * Added func `runtime.StartSpan()` for use by SDKs to start spans. + * Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context. + * Added field `TracingNamespace` to `runtime.PipelineOptions`. + * Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types. + * Added field `SpanFromContext` to `tracing.TracerOptions`. + * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`. + * Added supporting pipeline policies to include HTTP spans when creating clients. +* Added package `fake` to support generated fakes packages in SDKs. + * The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations. + * Added an internal fake poller implementation. + +### Bugs Fixed +* Retry policy always clones the underlying `*http.Request` before invoking the next policy. +* Added some non-standard error codes to the list of error codes for unregistered resource providers. + ## 1.6.0 (2023-05-04) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go index 72c2cf21..8eef8633 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -22,6 +22,24 @@ type AccessToken = exported.AccessToken // TokenCredential represents a credential capable of providing an OAuth token. type TokenCredential = exported.TokenCredential +// KeyCredential contains an authentication key used to authenticate to an Azure service. +type KeyCredential = exported.KeyCredential + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return exported.NewKeyCredential(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +type SASCredential = exported.SASCredential + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return exported.NewSASCredential(sas) +} + // holds sentinel values used to send nulls var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} @@ -66,26 +84,28 @@ func IsNullValue[T any](v T) bool { return false } -// ClientOptions contains configuration settings for a client's pipeline. +// ClientOptions contains optional settings for a client's pipeline. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. type ClientOptions = policy.ClientOptions // Client is a basic HTTP client. It consists of a pipeline and tracing provider. type Client struct { pl runtime.Pipeline tr tracing.Tracer + + // cached on the client to support shallow copying with new values + tp tracing.Provider + modVer string + namespace string } // NewClient creates a new Client instance with the provided values. -// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans -// - moduleVersion - the semantic version of the containing module; used by the telemetry policy +// - moduleName - the fully qualified name of the module where the client is defined; used by the telemetry policy and tracing provider. +// - moduleVersion - the semantic version of the module; used by the telemetry policy and tracing provider. // - plOpts - pipeline configuration options; can be the zero-value // - options - optional client configurations; pass nil to accept the default values -func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { - pkg, err := shared.ExtractPackageName(clientName) - if err != nil { - return nil, err - } - +func NewClient(moduleName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) { if options == nil { options = &ClientOptions{} } @@ -96,10 +116,20 @@ func NewClient(clientName, moduleVersion string, plOpts runtime.PipelineOptions, } } - pl := runtime.NewPipeline(pkg, moduleVersion, plOpts, options) + pl := runtime.NewPipeline(moduleName, moduleVersion, plOpts, options) + + tr := options.TracingProvider.NewTracer(moduleName, moduleVersion) + if tr.Enabled() && plOpts.Tracing.Namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: plOpts.Tracing.Namespace}) + } - tr := options.TracingProvider.NewTracer(clientName, moduleVersion) - return &Client{pl: pl, tr: tr}, nil + return &Client{ + pl: pl, + tr: tr, + tp: options.TracingProvider, + modVer: moduleVersion, + namespace: plOpts.Tracing.Namespace, + }, nil } // Pipeline returns the pipeline for this client. @@ -111,3 +141,14 @@ func (c *Client) Pipeline() runtime.Pipeline { func (c *Client) Tracer() tracing.Tracer { return c.tr } + +// WithClientName returns a shallow copy of the Client with its tracing client name changed to clientName. +// Note that the values for module name and version will be preserved from the source Client. +// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans +func (c *Client) WithClientName(clientName string) *Client { + tr := c.tp.NewTracer(clientName, c.modVer) + if tr.Enabled() && c.namespace != "" { + tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: c.namespace}) + } + return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer, namespace: c.namespace} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go index 28c64678..654a5f40 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -253,5 +253,12 @@ When resuming a poller, no IO is performed, and zero-value arguments can be used Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO BeginA() will result in an error. + +# Fakes + +The fake package contains types used for constructing in-memory fake servers used in unit tests. +This allows writing tests to cover various success/error conditions without the need for connecting to a live service. + +Please see https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/samples/fakes for details and examples on how to use fakes. */ package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go index a1236b36..f2b296b6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -8,8 +8,11 @@ package exported import ( "context" + "encoding/base64" + "fmt" "io" "net/http" + "sync/atomic" "time" ) @@ -51,6 +54,17 @@ type AccessToken struct { // TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. // Exported as policy.TokenRequestOptions. type TokenRequestOptions struct { + // Claims are any additional claims required for the token to satisfy a conditional access policy, such as a + // service may return in a claims challenge following an authorization failure. If a service returned the + // claims value base64 encoded, it must be decoded before setting this field. + Claims string + + // EnableCAE indicates whether to enable Continuous Access Evaluation (CAE) for the requested token. When true, + // azidentity credentials request CAE tokens for resource APIs supporting CAE. Clients are responsible for + // handling CAE challenges. If a client that doesn't handle CAE challenges receives a CAE token, it may end up + // in a loop retrying an API call with a token that has been revoked due to CAE. + EnableCAE bool + // Scopes contains the list of permission scopes required for the token. Scopes []string @@ -65,3 +79,97 @@ type TokenCredential interface { // GetToken requests an access token for the specified set of scopes. GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) } + +// DecodeByteArray will base-64 decode the provided string into v. +// Exported as runtime.DecodeByteArray() +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} + +// KeyCredential contains an authentication key used to authenticate to an Azure service. +// Exported as azcore.KeyCredential. +type KeyCredential struct { + cred *keyCredential +} + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return &KeyCredential{cred: newKeyCredential(key)} +} + +// Update replaces the existing key with the specified value. +func (k *KeyCredential) Update(key string) { + k.cred.Update(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +// Exported as azcore.SASCredential. +type SASCredential struct { + cred *keyCredential +} + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return &SASCredential{cred: newKeyCredential(sas)} +} + +// Update replaces the existing shared access signature with the specified value. +func (k *SASCredential) Update(sas string) { + k.cred.Update(sas) +} + +// KeyCredentialGet returns the key for cred. +func KeyCredentialGet(cred *KeyCredential) string { + return cred.cred.Get() +} + +// SASCredentialGet returns the shared access sig for cred. +func SASCredentialGet(cred *SASCredential) string { + return cred.cred.Get() +} + +type keyCredential struct { + key atomic.Value // string +} + +func newKeyCredential(key string) *keyCredential { + keyCred := keyCredential{} + keyCred.key.Store(key) + return &keyCred +} + +func (k *keyCredential) Get() string { + return k.key.Load().(string) +} + +func (k *keyCredential) Update(key string) { + k.key.Store(key) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go index c44efd6e..e45f831e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -8,10 +8,7 @@ package exported import ( "errors" - "fmt" "net/http" - - "golang.org/x/net/http/httpguts" ) // Policy represents an extensibility point for the Pipeline that can mutate the specified @@ -75,23 +72,6 @@ func (p Pipeline) Do(req *Request) (*http.Response, error) { if req == nil { return nil, errors.New("request cannot be nil") } - // check copied from Transport.roundTrip() - for k, vv := range req.Raw().Header { - if !httpguts.ValidHeaderFieldName(k) { - if req.Raw().Body != nil { - req.Raw().Body.Close() - } - return nil, fmt.Errorf("invalid header field name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - if req.Raw().Body != nil { - req.Raw().Body.Close() - } - return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) - } - } - } req.policies = p.policies return req.Next() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go index fa99d1b7..659f2a7d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -8,6 +8,7 @@ package exported import ( "context" + "encoding/base64" "errors" "fmt" "io" @@ -18,6 +19,28 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" ) +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +// Exported as runtime.Base64Encoding +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// EncodeByteArray will base-64 encode the byte slice v. +// Exported as runtime.EncodeByteArray() +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + // Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. // Don't use this type directly, use NewRequest() instead. // Exported as policy.Request. @@ -170,6 +193,14 @@ func (req *Request) Clone(ctx context.Context) *Request { return &r2 } +// WithContext returns a shallow copy of the request with its context changed to ctx. +func (req *Request) WithContext(ctx context.Context) *Request { + r2 := new(Request) + *r2 = *req + r2.req = r2.req.WithContext(ctx) + return r2 +} + // not exported but dependent on Request // PolicyFunc is a type that implements the Policy interface. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go index 7df2f88c..f2435528 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -13,6 +13,7 @@ import ( "net/http" "regexp" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -25,7 +26,7 @@ func NewResponseError(resp *http.Response) error { } // prefer the error code in the response header - if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" { respErr.ErrorCode = ec return respErr } @@ -112,33 +113,45 @@ type ResponseError struct { // Error implements the error interface for type ResponseError. // Note that the message contents are not contractual and can change over time. func (e *ResponseError) Error() string { + const separator = "--------------------------------------------------------------------------------" // write the request method and URL with response status code msg := &bytes.Buffer{} - fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) - fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.RawResponse != nil { + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + fmt.Fprintln(msg, "Request information not available") + } + fmt.Fprintln(msg, separator) + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + } else { + fmt.Fprintln(msg, "Missing RawResponse") + fmt.Fprintln(msg, separator) + } if e.ErrorCode != "" { fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) } else { fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") } - fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - body, err := exported.Payload(e.RawResponse, nil) - if err != nil { - // this really shouldn't fail at this point as the response - // body is already cached (it was read in NewResponseError) - fmt.Fprintf(msg, "Error reading response body: %v", err) - } else if len(body) > 0 { - if err := json.Indent(msg, body, "", " "); err != nil { - // failed to pretty-print so just dump it verbatim - fmt.Fprint(msg, string(body)) + if e.RawResponse != nil { + fmt.Fprintln(msg, separator) + body, err := exported.Payload(e.RawResponse, nil) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") } - // the standard library doesn't have a pretty-printer for XML - fmt.Fprintln(msg) - } else { - fmt.Fprintln(msg, "Response contained no body") } - fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintln(msg, separator) return msg.String() } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go new file mode 100644 index 00000000..25983471 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go @@ -0,0 +1,133 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package fake + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" +) + +// Applicable returns true if the LRO is a fake. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderFakePollerStatus) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["fakeURL"] + return ok +} + +// Poller is an LRO poller that uses the Core-Fake-Poller pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The API name from CtxAPINameKey + APIName string `json:"apiName"` + + // The URL from Core-Fake-Poller header. + FakeURL string `json:"fakeURL"` + + // The LRO's current state. + FakeStatus string `json:"status"` +} + +// lroStatusURLSuffix is the URL path suffix for a faked LRO. +const lroStatusURLSuffix = "/get/fake/status" + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Core-Fake-Poller poller.") + return &Poller[T]{pl: pl}, nil + } + + log.Write(log.EventLRO, "Using Core-Fake-Poller poller.") + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return nil, errors.New("response is missing Fake-Poller-Status header") + } + + ctxVal := resp.Request.Context().Value(shared.CtxAPINameKey{}) + if ctxVal == nil { + return nil, errors.New("missing value for CtxAPINameKey") + } + + apiName, ok := ctxVal.(string) + if !ok { + return nil, fmt.Errorf("expected string for CtxAPINameKey, the type was %T", ctxVal) + } + + qp := "" + if resp.Request.URL.RawQuery != "" { + qp = "?" + resp.Request.URL.RawQuery + } + + p := &Poller[T]{ + pl: pl, + resp: resp, + APIName: apiName, + // NOTE: any changes to this path format MUST be reflected in SanitizePollerPath() + FakeURL: fmt.Sprintf("%s://%s%s%s%s", resp.Request.URL.Scheme, resp.Request.URL.Host, resp.Request.URL.Path, lroStatusURLSuffix, qp), + FakeStatus: fakeStatus, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return poller.IsTerminalState(p.FakeStatus) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + ctx = context.WithValue(ctx, shared.CtxAPINameKey{}, p.APIName) + err := pollers.PollHelper(ctx, p.FakeURL, p.pl, func(resp *http.Response) (string, error) { + if !poller.StatusCodeValid(resp) { + p.resp = resp + return "", exported.NewResponseError(resp) + } + fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus) + if fakeStatus == "" { + return "", errors.New("response is missing Fake-Poller-Status header") + } + p.resp = resp + p.FakeStatus = fakeStatus + return p.FakeStatus, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if poller.Failed(p.FakeStatus) { + return exported.NewResponseError(p.resp) + } + + return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), out) +} + +// SanitizePollerPath removes any fake-appended suffix from a URL's path. +func SanitizePollerPath(path string) string { + return strings.TrimSuffix(path, lroStatusURLSuffix) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 681167bc..bb93daee 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -7,8 +7,9 @@ package shared const ( - ContentTypeAppJSON = "application/json" - ContentTypeAppXML = "application/xml" + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" + ContentTypeTextPlain = "text/plain" ) const ( @@ -17,20 +18,27 @@ const ( HeaderAzureAsync = "Azure-AsyncOperation" HeaderContentLength = "Content-Length" HeaderContentType = "Content-Type" + HeaderFakePollerStatus = "Fake-Poller-Status" HeaderLocation = "Location" HeaderOperationLocation = "Operation-Location" HeaderRetryAfter = "Retry-After" + HeaderRetryAfterMS = "Retry-After-Ms" HeaderUserAgent = "User-Agent" HeaderWWWAuthenticate = "WWW-Authenticate" HeaderXMSClientRequestID = "x-ms-client-request-id" + HeaderXMSRequestID = "x-ms-request-id" + HeaderXMSErrorCode = "x-ms-error-code" + HeaderXMSRetryAfterMS = "x-ms-retry-after-ms" ) const BearerTokenPrefix = "Bearer " +const TracingNamespaceAttrName = "az.namespace" + const ( // Module is the name of the calling module used in telemetry data. Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.6.0" + Version = "v1.9.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go index 930ab8c8..d3da2c5f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -13,18 +13,26 @@ import ( "reflect" "regexp" "strconv" - "strings" "time" ) +// NOTE: when adding a new context key type, it likely needs to be +// added to the deny-list of key types in ContextWithDeniedValues + // CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. type CtxWithHTTPHeaderKey struct{} // CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. type CtxWithRetryOptionsKey struct{} -// CtxIncludeResponseKey is used as a context key for retrieving the raw response. -type CtxIncludeResponseKey struct{} +// CtxWithCaptureResponse is used as a context key for retrieving the raw response. +type CtxWithCaptureResponse struct{} + +// CtxWithTracingTracer is used as a context key for adding/retrieving tracing.Tracer. +type CtxWithTracingTracer struct{} + +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey struct{} // Delay waits for the duration to elapse or the context to be cancelled. func Delay(ctx context.Context, delay time.Duration) error { @@ -36,22 +44,64 @@ func Delay(ctx context.Context, delay time.Duration) error { } } -// RetryAfter returns non-zero if the response contains a Retry-After header value. +// RetryAfter returns non-zero if the response contains one of the headers with a "retry after" value. +// Headers are checked in the following order: retry-after-ms, x-ms-retry-after-ms, retry-after func RetryAfter(resp *http.Response) time.Duration { if resp == nil { return 0 } - ra := resp.Header.Get(HeaderRetryAfter) - if ra == "" { - return 0 + + type retryData struct { + header string + units time.Duration + + // custom is used when the regular algorithm failed and is optional. + // the returned duration is used verbatim (units is not applied). + custom func(string) time.Duration } - // retry-after values are expressed in either number of - // seconds or an HTTP-date indicating when to try again - if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { - return time.Duration(retryAfter) * time.Second - } else if t, err := time.Parse(time.RFC1123, ra); err == nil { - return time.Until(t) + + nop := func(string) time.Duration { return 0 } + + // the headers are listed in order of preference + retries := []retryData{ + { + header: HeaderRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderXMSRetryAfterMS, + units: time.Millisecond, + custom: nop, + }, + { + header: HeaderRetryAfter, + units: time.Second, + + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + custom: func(ra string) time.Duration { + t, err := time.Parse(time.RFC1123, ra) + if err != nil { + return 0 + } + return time.Until(t) + }, + }, + } + + for _, retry := range retries { + v := resp.Header.Get(retry.header) + if v == "" { + continue + } + if retryAfter, _ := strconv.Atoi(v); retryAfter > 0 { + return time.Duration(retryAfter) * retry.units + } else if d := retry.custom(v); d > 0 { + return d + } } + return 0 } @@ -79,14 +129,21 @@ func ValidateModVer(moduleVersion string) error { return nil } -// ExtractPackageName returns "package" from "package.Client". -// If clientName is malformed, an error is returned. -func ExtractPackageName(clientName string) (string, error) { - pkg, client, ok := strings.Cut(clientName, ".") - if !ok { - return "", fmt.Errorf("missing . in clientName %s", clientName) - } else if pkg == "" || client == "" { - return "", fmt.Errorf("malformed clientName %s", clientName) +// ContextWithDeniedValues wraps an existing [context.Context], denying access to certain context values. +// Pipeline policies that create new requests to be sent down their own pipeline MUST wrap the caller's +// context with an instance of this type. This is to prevent context values from flowing across disjoint +// requests which can have unintended side-effects. +type ContextWithDeniedValues struct { + context.Context +} + +// Value implements part of the [context.Context] interface. +// It acts as a deny-list for certain context keys. +func (c *ContextWithDeniedValues) Value(key any) any { + switch key.(type) { + case CtxAPINameKey, CtxWithCaptureResponse, CtxWithHTTPHeaderKey, CtxWithRetryOptionsKey, CtxWithTracingTracer: + return nil + default: + return c.Context.Value(key) } - return pkg, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index b2000478..d934f1dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -7,11 +7,13 @@ package policy import ( + "context" "net/http" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) @@ -27,9 +29,11 @@ type Transporter = exported.Transporter type Request = exported.Request // ClientOptions contains optional settings for a client's pipeline. -// All zero-value fields will be initialized with default values. +// Instances can be shared across calls to SDK client constructors when uniform configuration is desired. +// Zero-value fields will have their specified default values applied during use. type ClientOptions struct { - // APIVersion overrides the default version requested of the service. Set with caution as this package version has not been tested with arbitrary service versions. + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. APIVersion string // Cloud specifies a cloud for the client. The default is Azure Public Cloud. @@ -162,3 +166,22 @@ type AuthorizationHandler struct { // the policy will return any 401 response to the client. OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error } + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxWithCaptureResponse{}, resp) +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index 5507665d..cffe692d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -10,6 +10,12 @@ import ( "context" "encoding/json" "errors" + "fmt" + "net/http" + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) // PagingHandler contains the required data for constructing a Pager. @@ -20,12 +26,16 @@ type PagingHandler[T any] struct { // Fetcher fetches the first and subsequent pages. Fetcher func(context.Context, *T) (T, error) + + // Tracer contains the Tracer from the client that's creating the Pager. + Tracer tracing.Tracer } // Pager provides operations for iterating over paged responses. type Pager[T any] struct { current *T handler PagingHandler[T] + tracer tracing.Tracer firstPage bool } @@ -34,6 +44,7 @@ type Pager[T any] struct { func NewPager[T any](handler PagingHandler[T]) *Pager[T] { return &Pager[T]{ handler: handler, + tracer: handler.Tracer, firstPage: true, } } @@ -48,8 +59,6 @@ func (p *Pager[T]) More() bool { // NextPage advances the pager to the next page. func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { - var resp T - var err error if p.current != nil { if p.firstPage { // we get here if it's an LRO-pager, we already have the first page @@ -58,12 +67,16 @@ func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { } else if !p.handler.More(*p.current) { return *new(T), errors.New("no more pages") } - resp, err = p.handler.Fetcher(ctx, p.current) } else { // non-LRO case, first page p.firstPage = false - resp, err = p.handler.Fetcher(ctx, nil) } + + var err error + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.NextPage", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err := p.handler.Fetcher(ctx, p.current) if err != nil { return *new(T), err } @@ -75,3 +88,41 @@ func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { func (p *Pager[T]) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.current) } + +// FetcherForNextLinkOptions contains the optional values for [FetcherForNextLink]. +type FetcherForNextLinkOptions struct { + // NextReq is the func to be called when requesting subsequent pages. + // Used for paged operations that have a custom next link operation. + NextReq func(context.Context, string) (*policy.Request, error) +} + +// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL. +// - ctx is the [context.Context] controlling the lifetime of the HTTP operation +// - pl is the [Pipeline] used to dispatch the HTTP request +// - nextLink is the URL used to fetch the next page. the empty string indicates the first page is to be requested +// - firstReq is the func to be called when creating the request for the first page +// - options contains any optional parameters, pass nil to accept the default values +func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) { + var req *policy.Request + var err error + if nextLink == "" { + req, err = firstReq(ctx) + } else if nextLink, err = EncodeQueryParams(nextLink); err == nil { + if options != nil && options.NextReq != nil { + req, err = options.NextReq(ctx, nextLink) + } else { + req, err = NewRequest(ctx, http.MethodGet, nextLink) + } + } + if err != nil { + return nil, err + } + resp, err := pl.Do(req) + if err != nil { + return nil, err + } + if !HasStatusCode(resp, http.StatusOK) { + return nil, NewResponseError(resp) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go index 9d9288f5..6b1f5c08 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -13,9 +13,35 @@ import ( // PipelineOptions contains Pipeline options for SDK developers type PipelineOptions struct { - AllowedHeaders, AllowedQueryParameters []string - APIVersion APIVersionOptions - PerCall, PerRetry []policy.Policy + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParameters is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParameters []string + + // APIVersion overrides the default version requested of the service. + // Set with caution as this package version has not been tested with arbitrary service versions. + APIVersion APIVersionOptions + + // PerCall contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCall []policy.Policy + + // PerRetry contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetry []policy.Policy + + // Tracing contains options used to configure distributed tracing. + Tracing TracingOptions +} + +// TracingOptions contains tracing options for SDK developers. +type TracingOptions struct { + // Namespace contains the value to use for the az.namespace span attribute. + Namespace string } // Pipeline represents a primitive for sending HTTP requests and receiving responses. @@ -56,8 +82,10 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy policies = append(policies, NewRetryPolicy(&cp.Retry)) policies = append(policies, plOpts.PerRetry...) policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, exported.PolicyFunc(httpHeaderPolicy)) + policies = append(policies, newHTTPTracePolicy(cp.Logging.AllowedQueryParams)) policies = append(policies, NewLogPolicy(&cp.Logging)) - policies = append(policies, exported.PolicyFunc(httpHeaderPolicy), exported.PolicyFunc(bodyDownloadPolicy)) + policies = append(policies, exported.PolicyFunc(bodyDownloadPolicy)) transport := cp.Transport if transport == nil { transport = defaultHTTPClient diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index b61e4c12..f0f28035 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -6,6 +6,7 @@ package runtime import ( "errors" "net/http" + "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -34,7 +35,7 @@ type acquiringResourceState struct { // acquire acquires or updates the resource; only one // thread/goroutine at a time ever calls this function func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) { - tk, err := state.p.cred.GetToken(state.req.Raw().Context(), state.tro) + tk, err := state.p.cred.GetToken(&shared.ContextWithDeniedValues{Context: state.req.Raw().Context()}, state.tro) if err != nil { return exported.AccessToken{}, time.Time{}, err } @@ -72,6 +73,17 @@ func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(p // Do authorizes a request with a bearer token func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no TokenCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if b.cred == nil { + return req.Next() + } + + if err := checkHTTPSForAuth(req); err != nil { + return nil, err + } + var err error if b.authzHandler.OnRequest != nil { err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) @@ -79,7 +91,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes}) } if err != nil { - return nil, ensureNonRetriable(err) + return nil, errorinfo.NonRetriableError(err) } res, err := req.Next() @@ -95,22 +107,15 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { } } } - return res, ensureNonRetriable(err) -} - -func ensureNonRetriable(err error) error { - var nre errorinfo.NonRetriable - if err != nil && !errors.As(err, &nre) { - err = btpError{err} + if err != nil { + err = errorinfo.NonRetriableError(err) } - return err + return res, err } -// btpError is a wrapper that ensures RetryPolicy doesn't retry requests BearerTokenPolicy couldn't authorize -type btpError struct { - error +func checkHTTPSForAuth(req *policy.Request) error { + if strings.ToLower(req.Raw().URL.Scheme) != "https" { + return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints")) + } + return nil } - -func (btpError) NonRetriable() {} - -var _ errorinfo.NonRetriable = (*btpError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go index 770e0a2b..c230af0a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -34,6 +34,7 @@ func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { // WithHTTPHeader adds the specified http.Header to the parent context. // Use this to specify custom HTTP headers at the API-call level. // Any overlapping headers will have their values replaced with the values specified here. +// Deprecated: use [policy.WithHTTPHeader] instead. func WithHTTPHeader(parent context.Context, header http.Header) context.Context { - return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) + return policy.WithHTTPHeader(parent, header) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go new file mode 100644 index 00000000..3df1c121 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go @@ -0,0 +1,143 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +const ( + attrHTTPMethod = "http.method" + attrHTTPURL = "http.url" + attrHTTPUserAgent = "http.user_agent" + attrHTTPStatusCode = "http.status_code" + + attrAZClientReqID = "az.client_request_id" + attrAZServiceReqID = "az.service_request_id" + + attrNetPeerName = "net.peer.name" +) + +// newHTTPTracePolicy creates a new instance of the httpTracePolicy. +// - allowedQueryParams contains the user-specified query parameters that don't need to be redacted from the trace +func newHTTPTracePolicy(allowedQueryParams []string) exported.Policy { + return &httpTracePolicy{allowedQP: getAllowedQueryParams(allowedQueryParams)} +} + +// httpTracePolicy is a policy that creates a trace for the HTTP request and its response +type httpTracePolicy struct { + allowedQP map[string]struct{} +} + +// Do implements the pipeline.Policy interfaces for the httpTracePolicy type. +func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() { + attributes := []tracing.Attribute{ + {Key: attrHTTPMethod, Value: req.Raw().Method}, + {Key: attrHTTPURL, Value: getSanitizedURL(*req.Raw().URL, h.allowedQP)}, + {Key: attrNetPeerName, Value: req.Raw().URL.Host}, + } + + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + attributes = append(attributes, tracing.Attribute{Key: attrHTTPUserAgent, Value: ua}) + } + if reqID := req.Raw().Header.Get(shared.HeaderXMSClientRequestID); reqID != "" { + attributes = append(attributes, tracing.Attribute{Key: attrAZClientReqID, Value: reqID}) + } + + ctx := req.Raw().Context() + ctx, span := tracer.Start(ctx, "HTTP "+req.Raw().Method, &tracing.SpanOptions{ + Kind: tracing.SpanKindClient, + Attributes: attributes, + }) + + defer func() { + if resp != nil { + span.SetAttributes(tracing.Attribute{Key: attrHTTPStatusCode, Value: resp.StatusCode}) + if resp.StatusCode > 399 { + span.SetStatus(tracing.SpanStatusError, resp.Status) + } + if reqID := resp.Header.Get(shared.HeaderXMSRequestID); reqID != "" { + span.SetAttributes(tracing.Attribute{Key: attrAZServiceReqID, Value: reqID}) + } + } else if err != nil { + var urlErr *url.Error + if errors.As(err, &urlErr) { + // calling *url.Error.Error() will include the unsanitized URL + // which we don't want. in addition, we already have the HTTP verb + // and sanitized URL in the trace so we aren't losing any info + err = urlErr.Err + } + span.SetStatus(tracing.SpanStatusError, err.Error()) + } + span.End() + }() + + req = req.WithContext(ctx) + } + resp, err = req.Next() + return +} + +// StartSpanOptions contains the optional values for StartSpan. +type StartSpanOptions struct { + // for future expansion +} + +// StartSpan starts a new tracing span. +// You must call the returned func to terminate the span. Pass the applicable error +// if the span will exit with an error condition. +// - ctx is the parent context of the newly created context +// - name is the name of the span. this is typically the fully qualified name of an API ("Client.Method") +// - tracer is the client's Tracer for creating spans +// - options contains optional values. pass nil to accept any default values +func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options *StartSpanOptions) (context.Context, func(error)) { + if !tracer.Enabled() { + return ctx, func(err error) {} + } + + // we MUST propagate the active tracer before returning so that the trace policy can access it + ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer) + + const newSpanKind = tracing.SpanKindInternal + if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil { + // per the design guidelines, if a SDK method Foo() calls SDK method Bar(), + // then the span for Bar() must be suppressed. however, if Bar() makes a REST + // call, then Bar's HTTP span must be a child of Foo's span. + // however, there is an exception to this rule. if the SDK method Foo() is a + // messaging producer/consumer, and it takes a callback that's a SDK method + // Bar(), then the span for Bar() must _not_ be suppressed. + if kind := activeSpan.(tracing.SpanKind); kind == tracing.SpanKindClient || kind == tracing.SpanKindInternal { + return ctx, func(err error) {} + } + } + ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{ + Kind: newSpanKind, + }) + ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind) + return ctx, func(err error) { + if err != nil { + errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1) + span.SetStatus(tracing.SpanStatusError, fmt.Sprintf("%s:\n%s", errType, err.Error())) + } + span.End() + } +} + +// ctxActiveSpan is used as a context key for indicating a SDK client span is in progress. +type ctxActiveSpan struct{} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go index 4714baa3..bb00f6c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -20,7 +20,7 @@ func includeResponsePolicy(req *policy.Request) (*http.Response, error) { if resp == nil { return resp, err } - if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + if httpOutRaw := req.Raw().Context().Value(shared.CtxWithCaptureResponse{}); httpOutRaw != nil { httpOut := httpOutRaw.(**http.Response) *httpOut = resp } @@ -29,6 +29,7 @@ func includeResponsePolicy(req *policy.Request) (*http.Response, error) { // WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. // The resp parameter will contain the HTTP response after the request has completed. +// Deprecated: use [policy.WithCaptureResponse] instead. func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { - return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) + return policy.WithCaptureResponse(parent, resp) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go new file mode 100644 index 00000000..6f577fa7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential]. +type KeyCredentialPolicy struct { + cred *exported.KeyCredential + header string + prefix string +} + +// KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy]. +type KeyCredentialPolicyOptions struct { + // Prefix is used if the key requires a prefix before it's inserted into the HTTP request. + Prefix string +} + +// NewKeyCredentialPolicy creates a new instance of [KeyCredentialPolicy]. +// - cred is the [azcore.KeyCredential] used to authenticate with the service +// - header is the name of the HTTP request header in which the key is placed +// - options contains optional configuration, pass nil to accept the default values +func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options *KeyCredentialPolicyOptions) *KeyCredentialPolicy { + if options == nil { + options = &KeyCredentialPolicyOptions{} + } + return &KeyCredentialPolicy{ + cred: cred, + header: header, + prefix: options.Prefix, + } +} + +// Do implementes the Do method on the [policy.Polilcy] interface. +func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { + // skip adding the authorization header if no KeyCredential was provided. + // this prevents a panic that might be hard to diagnose and allows testing + // against http endpoints that don't require authentication. + if k.cred != nil { + if err := checkHTTPSForAuth(req); err != nil { + return nil, err + } + val := exported.KeyCredentialGet(k.cred) + if k.prefix != "" { + val = k.prefix + val + } + req.Raw().Header.Add(k.header, val) + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go index 8514f57d..f048d7fb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -191,7 +191,8 @@ func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { } sort.Strings(keys) for _, k := range keys { - value := header.Get(k) + // don't use Get() as it will canonicalize k which might cause a mismatch + value := header[k][0] // redact all header values not in the allow-list if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { value = redactedValue diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index 5f52ba75..04d7bb4e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -59,15 +59,7 @@ func setDefaults(o *policy.RetryOptions) { } func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 - pow := func(number int64, exponent int32) int64 { // pow is nested helper function - var result int64 = 1 - for n := int32(0); n < exponent; n++ { - result *= number - } - return result - } - - delay := time.Duration(pow(2, try)-1) * o.RetryDelay + delay := time.Duration((1< -1 { + mod = mod[i+1:] + } b.WriteString(formatTelemetry(mod, ver)) b.WriteRune(' ') b.WriteString(platformInfo) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go index 3d029a3d..c373f689 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -13,6 +13,8 @@ import ( "flag" "fmt" "net/http" + "reflect" + "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -20,9 +22,11 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" "github.com/Azure/azure-sdk-for-go/sdk/internal/poller" ) @@ -54,6 +58,9 @@ type NewPollerOptions[T any] struct { // Handler[T] contains a custom polling implementation. Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer } // NewPoller creates a Poller based on the provided initial response. @@ -70,6 +77,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol op: options.Handler, resp: resp, result: result, + tracer: options.Tracer, }, nil } @@ -83,7 +91,9 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol // determine the polling method var opr PollingHandler[T] var err error - if async.Applicable(resp) { + if fake.Applicable(resp) { + opr, err = fake.New[T](pl, resp) + } else if async.Applicable(resp) { // async poller must be checked first as it can also have a location header opr, err = async.New[T](pl, resp, options.FinalStateVia) } else if op.Applicable(resp) { @@ -110,6 +120,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol op: opr, resp: resp, result: result, + tracer: options.Tracer, }, nil } @@ -121,6 +132,9 @@ type NewPollerFromResumeTokenOptions[T any] struct { // Handler[T] contains a custom polling implementation. Handler PollingHandler[T] + + // Tracer contains the Tracer from the client that's creating the Poller. + Tracer tracing.Tracer } // NewPollerFromResumeToken creates a Poller from a resume token string. @@ -147,7 +161,9 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options opr := options.Handler // now rehydrate the poller based on the encoded poller type - if opr != nil { + if fake.CanResume(asJSON) { + opr, _ = fake.New[T](pl, nil) + } else if opr != nil { log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) } else if async.CanResume(asJSON) { opr, _ = async.New[T](pl, nil, "") @@ -166,6 +182,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options return &Poller[T]{ op: opr, result: result, + tracer: options.Tracer, }, nil } @@ -188,6 +205,7 @@ type Poller[T any] struct { resp *http.Response err error result *T + tracer tracing.Tracer done bool } @@ -203,7 +221,7 @@ type PollUntilDoneOptions struct { // options: pass nil to accept the default values. // NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might // benefit from a shorter or longer duration. -func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (res T, err error) { if options == nil { options = &PollUntilDoneOptions{} } @@ -212,9 +230,13 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt cp.Frequency = 30 * time.Second } + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.PollUntilDone", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + // skip the floor check when executing tests so they don't take so long if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second { - return *new(T), errors.New("polling frequency minimum is one second") + err = errors.New("polling frequency minimum is one second") + return } start := time.Now() @@ -226,22 +248,24 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt // initial check for a retry-after header existing on the initial response if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) - if err := shared.Delay(ctx, retryAfter); err != nil { + if err = shared.Delay(ctx, retryAfter); err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } } } // begin polling the endpoint until a terminal state is reached for { - resp, err := p.Poll(ctx) + var resp *http.Response + resp, err = p.Poll(ctx) if err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } if p.Done() { logPollUntilDoneExit("succeeded") - return p.Result(ctx) + res, err = p.Result(ctx) + return } d := cp.Frequency if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { @@ -252,7 +276,7 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt } if err = shared.Delay(ctx, d); err != nil { logPollUntilDoneExit(err) - return *new(T), err + return } } } @@ -261,17 +285,22 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt // If Poll succeeds, the poller's state is updated and the HTTP response is returned. // If Poll fails, the poller's state is unmodified and the error is returned. // Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. -func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { +func (p *Poller[T]) Poll(ctx context.Context) (resp *http.Response, err error) { if p.Done() { // the LRO has reached a terminal state, don't poll again - return p.resp, nil + resp = p.resp + return } - resp, err := p.op.Poll(ctx) + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Poll", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + resp, err = p.op.Poll(ctx) if err != nil { - return nil, err + return } p.resp = resp - return p.resp, nil + return } // Done returns true if the LRO has reached a terminal state. @@ -284,31 +313,40 @@ func (p *Poller[T]) Done() bool { // If the LRO completed successfully, a populated instance of T is returned. // If the LRO failed or was canceled, an *azcore.ResponseError error is returned. // Calling this on an LRO in a non-terminal state will return an error. -func (p *Poller[T]) Result(ctx context.Context) (T, error) { +func (p *Poller[T]) Result(ctx context.Context) (res T, err error) { if !p.Done() { - return *new(T), errors.New("poller is in a non-terminal state") + err = errors.New("poller is in a non-terminal state") + return } if p.done { // the result has already been retrieved, return the cached value if p.err != nil { - return *new(T), p.err + err = p.err + return } - return *p.result, nil + res = *p.result + return } - err := p.op.Result(ctx, p.result) + + ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Result", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil) + defer func() { endSpan(err) }() + + err = p.op.Result(ctx, p.result) var respErr *exported.ResponseError if errors.As(err, &respErr) { // the LRO failed. record the error p.err = err } else if err != nil { // the call to Result failed, don't cache anything in this case - return *new(T), err + return } p.done = true if p.err != nil { - return *new(T), p.err + err = p.err + return } - return *p.result, nil + res = *p.result + return } // ResumeToken returns a value representing the poller that can be used to resume @@ -325,3 +363,22 @@ func (p *Poller[T]) ResumeToken() (string, error) { } return tk, err } + +// extracts the type name from the string returned from reflect.Value.Name() +func shortenTypeName(s string) string { + // the value is formatted as follows + // Poller[module/Package.Type].Method + // we want to shorten the generic type parameter string to Type + // anything we don't recognize will be left as-is + begin := strings.Index(s, "[") + end := strings.Index(s, "]") + if begin == -1 || end == -1 { + return s + } + + typeName := s[begin+1 : end] + if i := strings.LastIndex(typeName, "."); i > -1 { + typeName = typeName[i+1:] + } + return s[:begin+1] + typeName + s[end:] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 98e00718..e97223da 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -9,17 +9,14 @@ package runtime import ( "bytes" "context" - "encoding/base64" "encoding/json" "encoding/xml" "fmt" "io" "mime/multipart" - "os" + "net/url" "path" - "reflect" "strings" - "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" @@ -28,14 +25,14 @@ import ( // Base64Encoding is usesd to specify which base-64 encoder/decoder to use when // encoding/decoding a slice of bytes to/from a string. -type Base64Encoding int +type Base64Encoding = exported.Base64Encoding const ( // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. - Base64StdFormat Base64Encoding = 0 + Base64StdFormat Base64Encoding = exported.Base64StdFormat // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. - Base64URLFormat Base64Encoding = 1 + Base64URLFormat Base64Encoding = exported.Base64URLFormat ) // NewRequest creates a new policy.Request with the specified input. @@ -44,6 +41,19 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic return exported.NewRequest(ctx, httpMethod, endpoint) } +// EncodeQueryParams will parse and encode any query parameters in the specified URL. +func EncodeQueryParams(u string) (string, error) { + before, after, found := strings.Cut(u, "?") + if !found { + return u, nil + } + qp, err := url.ParseQuery(after) + if err != nil { + return "", err + } + return before + "?" + qp.Encode(), nil +} + // JoinPaths concatenates multiple URL path segments into one path, // inserting path separation characters as required. JoinPaths will preserve // query parameters in the root path @@ -79,10 +89,7 @@ func JoinPaths(root string, paths ...string) string { // EncodeByteArray will base-64 encode the byte slice v. func EncodeByteArray(v []byte, format Base64Encoding) string { - if format == Base64URLFormat { - return base64.RawURLEncoding.EncodeToString(v) - } - return base64.StdEncoding.EncodeToString(v) + return exported.EncodeByteArray(v, format) } // MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. @@ -95,9 +102,6 @@ func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) er // MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. func MarshalAsJSON(req *policy.Request, v interface{}) error { - if omit := os.Getenv("AZURE_SDK_GO_OMIT_READONLY"); omit == "true" { - v = cloneWithoutReadOnlyFields(v) - } b, err := json.Marshal(v) if err != nil { return fmt.Errorf("error marshalling type %T: %s", v, err) @@ -169,80 +173,5 @@ func SkipBodyDownload(req *policy.Request) { req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) } -// returns a clone of the object graph pointed to by v, omitting values of all read-only -// fields. if there are no read-only fields in the object graph, no clone is created. -func cloneWithoutReadOnlyFields(v interface{}) interface{} { - val := reflect.Indirect(reflect.ValueOf(v)) - if val.Kind() != reflect.Struct { - // not a struct, skip - return v - } - // first walk the graph to find any R/O fields. - // if there aren't any, skip cloning the graph. - if !recursiveFindReadOnlyField(val) { - return v - } - return recursiveCloneWithoutReadOnlyFields(val) -} - -// returns true if any field in the object graph of val contains the `azure:"ro"` tag value -func recursiveFindReadOnlyField(val reflect.Value) bool { - t := val.Type() - // iterate over the fields, looking for the "azure" tag. - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - aztag := field.Tag.Get("azure") - if azureTagIsReadOnly(aztag) { - return true - } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { - return true - } - } - return false -} - -// clones the object graph of val. all non-R/O properties are copied to the clone -func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { - t := val.Type() - clone := reflect.New(t) - // iterate over the fields, looking for the "azure" tag. - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - aztag := field.Tag.Get("azure") - if azureTagIsReadOnly(aztag) { - // omit from payload - continue - } - // clone field will receive the same value as the source field... - value := val.Field(i) - v := reflect.Indirect(value) - if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { - // ...unless the source value is a struct, in which case we recurse to clone that struct. - // (We can't recursively clone time.Time because it contains unexported fields.) - c := recursiveCloneWithoutReadOnlyFields(v) - if field.Anonymous { - // NOTE: this does not handle the case of embedded fields of unexported struct types. - // this should be ok as we don't generate any code like this at present - value = reflect.Indirect(reflect.ValueOf(c)) - } else { - value = reflect.ValueOf(c) - } - } - reflect.Indirect(clone).Field(i).Set(value) - } - return clone.Interface() -} - -// returns true if the "azure" tag contains the option "ro" -func azureTagIsReadOnly(tag string) bool { - if tag == "" { - return false - } - parts := strings.Split(tag, ",") - for _, part := range parts { - if part == "ro" { - return true - } - } - return false -} +// CtxAPINameKey is used as a context key for adding/retrieving the API name. +type CtxAPINameKey = shared.CtxAPINameKey diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go index d1f58e9e..003c875b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -8,13 +8,13 @@ package runtime import ( "bytes" - "encoding/base64" "encoding/json" "encoding/xml" "fmt" "io" "net/http" + azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/internal/exported" ) @@ -105,31 +105,5 @@ func removeBOM(resp *http.Response) error { // DecodeByteArray will base-64 decode the provided string into v. func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { - if len(s) == 0 { - return nil - } - payload := string(s) - if payload[0] == '"' { - // remove surrounding quotes - payload = payload[1 : len(payload)-1] - } - switch format { - case Base64StdFormat: - decoded, err := base64.StdEncoding.DecodeString(payload) - if err == nil { - *v = decoded - return nil - } - return err - case Base64URLFormat: - // use raw encoding as URL format should not contain any '=' characters - decoded, err := base64.RawURLEncoding.DecodeString(payload) - if err == nil { - *v = decoded - return nil - } - return err - default: - return fmt.Errorf("unrecognized byte array format: %d", format) - } + return azexported.DecodeByteArray(s, v, format) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go new file mode 100644 index 00000000..1c75d771 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go @@ -0,0 +1,15 @@ +//go:build !wasm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return dialer.DialContext +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go new file mode 100644 index 00000000..3dc9eeec --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go @@ -0,0 +1,15 @@ +//go:build (js && wasm) || wasip1 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go index 869bed51..2124c1d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -11,6 +11,8 @@ import ( "net" "net/http" "time" + + "golang.org/x/net/http2" ) var defaultHTTPClient *http.Client @@ -18,19 +20,28 @@ var defaultHTTPClient *http.Client func init() { defaultTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ + DialContext: defaultTransportDialContext(&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, - }).DialContext, + }), ForceAttemptHTTP2: true, MaxIdleConns: 100, + MaxIdleConnsPerHost: 10, IdleConnTimeout: 90 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 1 * time.Second, TLSClientConfig: &tls.Config{ - MinVersion: tls.VersionTLS12, + MinVersion: tls.VersionTLS12, + Renegotiation: tls.RenegotiateFreelyAsClient, }, } + // TODO: evaluate removing this once https://github.com/golang/go/issues/59690 has been fixed + if http2Transport, err := http2.ConfigureTransports(defaultTransport); err == nil { + // if the connection has been idle for 10 seconds, send a ping frame for a health check + http2Transport.ReadIdleTimeout = 10 * time.Second + // if there's no response to the ping within the timeout, the connection will be closed + http2Transport.PingTimeout = 5 * time.Second + } defaultHTTPClient = &http.Client{ Transport: defaultTransport, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go index 75f757ce..1ade7c56 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go @@ -31,12 +31,12 @@ type Provider struct { newTracerFn func(name, version string) Tracer } -// NewTracer creates a new Tracer for the specified name and version. -// - name - the name of the tracer object, typically the fully qualified name of the service client -// - version - the version of the module in which the service client resides -func (p Provider) NewTracer(name, version string) (tracer Tracer) { +// NewTracer creates a new Tracer for the specified module name and version. +// - module - the fully qualified name of the module +// - version - the version of the module +func (p Provider) NewTracer(module, version string) (tracer Tracer) { if p.newTracerFn != nil { - tracer = p.newTracerFn(name, version) + tracer = p.newTracerFn(module, version) } return } @@ -45,21 +45,28 @@ func (p Provider) NewTracer(name, version string) (tracer Tracer) { // TracerOptions contains the optional values when creating a Tracer. type TracerOptions struct { - // for future expansion + // SpanFromContext contains the implementation for the Tracer.SpanFromContext method. + SpanFromContext func(context.Context) Span } // NewTracer creates a Tracer with the specified values. // - newSpanFn is the underlying implementation for creating Span instances // - options contains optional values; pass nil to accept the default value func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer { + if options == nil { + options = &TracerOptions{} + } return Tracer{ - newSpanFn: newSpanFn, + newSpanFn: newSpanFn, + spanFromContextFn: options.SpanFromContext, } } // Tracer is the factory that creates Span instances. type Tracer struct { - newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + attrs []Attribute + newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) + spanFromContextFn func(ctx context.Context) Span } // Start creates a new span and a context.Context that contains it. @@ -68,11 +75,37 @@ type Tracer struct { // - options contains optional values for the span, pass nil to accept any defaults func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) { if t.newSpanFn != nil { - return t.newSpanFn(ctx, spanName, options) + opts := SpanOptions{} + if options != nil { + opts = *options + } + opts.Attributes = append(opts.Attributes, t.attrs...) + return t.newSpanFn(ctx, spanName, &opts) } return ctx, Span{} } +// SetAttributes sets attrs to be applied to each Span. If a key from attrs +// already exists for an attribute of the Span it will be overwritten with +// the value contained in attrs. +func (t *Tracer) SetAttributes(attrs ...Attribute) { + t.attrs = append(t.attrs, attrs...) +} + +// Enabled returns true if this Tracer is capable of creating Spans. +func (t Tracer) Enabled() bool { + return t.newSpanFn != nil +} + +// SpanFromContext returns the Span associated with the current context. +// If the provided context has no Span, false is returned. +func (t Tracer) SpanFromContext(ctx context.Context) Span { + if t.spanFromContextFn != nil { + return t.spanFromContextFn(ctx) + } + return Span{} +} + // SpanOptions contains optional settings for creating a span. type SpanOptions struct { // Kind indicates the kind of Span. @@ -97,9 +130,6 @@ type SpanImpl struct { // AddEvent contains the implementation for the Span.AddEvent method. AddEvent func(string, ...Attribute) - // AddError contains the implementation for the Span.AddError method. - AddError func(err error) - // SetStatus contains the implementation for the Span.SetStatus method. SetStatus func(SpanStatus, string) } @@ -140,13 +170,6 @@ func (s Span) AddEvent(name string, attrs ...Attribute) { } } -// AddError adds the specified error event to the span. -func (s Span) AddError(err error) { - if s.impl.AddError != nil { - s.impl.AddError(err) - } -} - // SetStatus sets the status on the span along with a description. func (s Span) SetStatus(code SpanStatus, desc string) { if s.impl.SetStatus != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go index ade7b348..8ee66b52 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -14,3 +14,33 @@ type NonRetriable interface { error NonRetriable() } + +// NonRetriableError marks the specified error as non-retriable. +// This function takes an error as input and returns a new error that is marked as non-retriable. +func NonRetriableError(err error) error { + return &nonRetriableError{err} +} + +// nonRetriableError is a struct that embeds the error interface. +// It is used to represent errors that should not be retried. +type nonRetriableError struct { + error +} + +// Error method for nonRetriableError struct. +// It returns the error message of the embedded error. +func (p *nonRetriableError) Error() string { + return p.error.Error() +} + +// NonRetriable is a marker method for nonRetriableError struct. +// Non-functional and indicates that the error is non-retriable. +func (*nonRetriableError) NonRetriable() { + // marker method +} + +// Unwrap method for nonRetriableError struct. +// It returns the original error that was marked as non-retriable. +func (p *nonRetriableError) Unwrap() error { + return p.error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index 832491b3..284ea54e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,5 +1,86 @@ # Release History +## 1.2.1 (2023-12-13) + +### Features Added + +* Exposed GetSASURL from specialized clients + +### Bugs Fixed + +* Fixed case in Blob Batch API when blob path has / in it. Fixes [#21649](https://github.com/Azure/azure-sdk-for-go/issues/21649). +* Fixed SharedKeyMissingError when using client.BlobClient().GetSASURL() method +* Fixed an issue that would cause metadata keys with empty values to be omitted when enumerating blobs. +* Fixed an issue where passing empty map to set blob tags API was causing panic. Fixes [#21869](https://github.com/Azure/azure-sdk-for-go/issues/21869). +* Fixed an issue where downloaded file has incorrect size when not a multiple of block size. Fixes [#21995](https://github.com/Azure/azure-sdk-for-go/issues/21995). +* Fixed case where `io.ErrUnexpectedEOF` was treated as expected error in `UploadStream`. Fixes [#21837](https://github.com/Azure/azure-sdk-for-go/issues/21837). + +### Other Changes + +* Updated the version of `azcore` to `1.9.1` and `azidentity` to `1.4.0`. + +## 1.2.0 (2023-10-11) + +### Bugs Fixed +* Fixed null pointer exception when `SetImmutabilityPolicyOptions` is passed as `nil`. + +## 1.2.0-beta.1 (2023-09-18) + +### Features Added +* Added support for service version 2020-12-06, 2021-02-12, 2021-04-10, 2021-06-08, 2021-08-06 , 2021-10-04, 2021-12-02, 2022-11-02, 2023-01-03, 2023-05-03, and 2023-08-03 +* Added support for [Cold Tier](https://learn.microsoft.com/azure/storage/blobs/access-tiers-overview?tabs=azure-portal). +* Added `CopySourceTag` option for `UploadBlobFromURLOptions` +* Added [FilterBlobs by Tags](https://learn.microsoft.com/rest/api/storageservices/find-blobs-by-tags-container) API for container client. +* Added `System` option to `ListContainersInclude` to allow listing of system containers (i.e, $web). +* Updated the SAS Version to `2021-12-02` and added `Encryption Scope` to Account SAS, Service SAS, and User Delegation SAS +* Added `ArchiveStatusRehydratePendingToCold` value to `ArchiveStatus` enum. +* Content length limit for `AppendBlob.AppendBlock()` and `AppendBlob.AppendBlockFromURL()` raised from 4 MB to 100 MB. + +### Bugs Fixed +* Fixed issue where some requests fail with mismatch in string to sign. +* Fixed service SAS creation where expiry time or permissions can be omitted when stored access policy is used. Fixes [#21229](https://github.com/Azure/azure-sdk-for-go/issues/21229). + +### Other Changes +* Updating version of azcore to 1.6.0. + +## 1.1.0 (2023-07-13) + +### Features Added + +* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). +* Added support for bearer challenge for identity based managed disks. +* Added support for GetAccountInfo to container and blob level clients. +* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). +* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL +* Added support for tag permission in Container SAS. + +### Bugs Fixed + +* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). +* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. +* Fixed block size and number of blocks calculation in `UploadBuffer` and `UploadFile`. Fixes [#20735](https://github.com/Azure/azure-sdk-for-go/issues/20735). + +### Other Changes + +* Add `dragonfly` to the list of build constraints for `blockblob`. +* Updating version of azcore to 1.6.0 and azidentity to 1.3.0 + +## 1.1.0-beta.1 (2023-05-09) + +### Features Added + +* Added [Blob Batch API](https://learn.microsoft.com/rest/api/storageservices/blob-batch). +* Added support for bearer challenge for identity based managed disks. +* Added support for GetAccountInfo to container and blob level clients. +* Added [UploadBlobFromURL API](https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url). +* Added support for CopySourceAuthorization to appendblob.AppendBlockFromURL +* Added support for tag permission in Container SAS. + +### Bugs Fixed + +* Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). +* Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. + ## 1.0.0 (2023-02-07) ### Features Added diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 467fe36c..905fb267 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,47 +1,51 @@ -# Azure Blob Storage SDK for Go +# Azure Blob Storage module for Go -> Server Version: 2020-10-02 +> Service Version: 2023-08-03 -Azure Blob storage is Microsoft's object storage solution for the cloud. Blob -storage is optimized for storing massive amounts of unstructured data. -Unstructured data is data that does not adhere to a particular data model or -definition, such as text or binary data. +Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob +Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or +definition, such as text or binary data. For more information, see [Introduction to Azure Blob Storage](https://learn.microsoft.com/azure/storage/blobs/storage-blobs-introduction). -[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] +Use the Azure Blob Storage client module `github.com/Azure/azure-sdk-for-go/sdk/storage/azblob` to: -## Getting started - -### Install the package +* Authenticate clients with Azure Blob Storage +* Manipulate containers and blobs in an Azure storage account -Install the Azure Blob Storage SDK for Go with [go get][goget]: +Key links: -```Powershell -go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob -``` +[Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs] | [Samples][go_samples] -If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module. -```Powershell -go get github.com/Azure/azure-sdk-for-go/sdk/azidentity -``` +## Getting started ### Prerequisites -A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases). - -You need an [Azure subscription][azure_sub] and a -[Storage Account][storage_account_docs] to use this package. - -To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal], +- Go, version 1.18 or higher - [Install Go](https://go.dev/doc/install) +- Azure subscription - [Create a free account](https://azure.microsoft.com/free/) +- Azure storage account - To create a storage account, use tools including the [Azure portal][storage_account_create_portal], [Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli]. Here's an example using the Azure CLI: -```Powershell +```bash az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS ``` +### Install the package + +Install the Azure Blob Storage client module for Go with [go get][goget]: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +``` + +If you plan to authenticate with Azure Active Directory (recommended), also install the [azidentity][azidentity] module. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + ### Authenticate the client -In order to interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. +To interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services. ```go // create a credential for authenticating with Azure Active Directory @@ -53,11 +57,17 @@ client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/ // TODO: handle err ``` -Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps). +Learn more about enabling Azure Active Directory for authentication with Azure Storage: + +* [Authorize access to blobs using Azure Active Directory][storage_ad] + +Other options for authentication include connection strings, shared key, shared access signatures (SAS), and anonymous public access. Use the appropriate client constructor function for the authentication mechanism you wish to use. For examples, see: + +* [Blob samples][samples] ## Key concepts -Blob storage is designed for: +Blob Storage is designed for: - Serving images or documents directly to a browser. - Storing files for distributed access. @@ -66,23 +76,41 @@ Blob storage is designed for: - Storing data for backup and restore, disaster recovery, and archiving. - Storing data for analysis by an on-premises or Azure-hosted service. -Blob storage offers three types of resources: +Blob Storage offers three types of resources: - The _storage account_ - One or more _containers_ in a storage account -- One ore more _blobs_ in a container +- One or more _blobs_ in a container Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account. The storage account is specified when the `azblob.Client` is constructed. -Use the appropriate client constructor function for the authentication mechanism you wish to use. -Learn more about options for authentication _(including Connection Strings, Shared Key, Shared Access Signatures (SAS), Azure Active Directory (AAD), and anonymous public access)_ [in our examples.](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go) +### Specialized clients + +The Azure Blob Storage client module for Go also provides specialized clients in various subpackages. Use these clients when you need to interact with a specific kind of blob. Learn more about [block blobs, append blobs, and page blobs](https://learn.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs). + +- [appendblob][append_blob] +- [blockblob][block_blob] +- [pageblob][page_blob] + +The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. + +The [lease][lease] package contains clients for managing leases on blobs and containers. See the [REST API reference](https://learn.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. + +The [container][container] package contains APIs specific to containers. This includes APIs for setting access policies or properties, and more. + +The [service][service] package contains APIs specific to the Blob service. This includes APIs for manipulating containers, retrieving account information, and more. + +The [sas][sas] package contains utilities to aid in the creation and manipulation of shared access signature (SAS) tokens. +See the package's documentation for more information. ### Goroutine safety -We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines. -### About blob metadata -Blob metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. +We guarantee that all client instance methods are goroutine-safe and independent of each other (see [guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation to reuse client instances is always safe, even across goroutines. + +### Blob metadata + +Blob metadata name-value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters. ### Additional concepts @@ -94,7 +122,7 @@ Blob metadata name/value pairs are valid HTTP headers and should adhere to all r ## Examples -### Uploading a blob +### Upload a blob ```go const ( @@ -122,7 +150,7 @@ _, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil) // TODO: handle error ``` -### Downloading a blob +### Download a blob ```go // this example accesses a public blob via anonymous access, so no credentials are required @@ -139,7 +167,7 @@ _, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil) // TODO: handle error ``` -### Enumerating blobs +### Enumerate blobs ```go const ( @@ -177,7 +205,7 @@ All Blob service operations will return an [*azcore.ResponseError][azcore_response_error] on failure with a populated `ErrorCode` field. Many of these errors are recoverable. The [bloberror][blob_error] package provides the possible Storage error codes -along with various helper facilities for error handling. +along with helper facilities for error handling. ```go const ( @@ -201,28 +229,7 @@ if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNo ## Next steps -Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. - -### Specialized clients - -The Azure Blob Storage SDK for Go also provides specialized clients in various subpackages. -Use these clients when you need to interact with a specific kind of blob. -Learn more about the various types of blobs from the following links. - -- [appendblob][append_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-append-blobs) -- [blockblob][block_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs) -- [pageblob][page_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-page-blobs) - -The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more. - -The [lease][lease] package contains clients for managing leases on blobs and containers. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases. - -The [container][container] package contains APIs specific to containers. This includes APIs setting access policies or properties, and more. - -The [service][service] package contains APIs specific to blob service. This includes APIs for manipulating containers, retrieving account information, and more. - -The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens. -See the package's documentation for more information. +Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more. ## Contributing @@ -243,19 +250,20 @@ additional questions or comments. [source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob -[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob -[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api -[product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview +[docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#section_documentation +[rest_docs]: https://learn.microsoft.com/rest/api/storageservices/blob-service-rest-api +[product_docs]: https://learn.microsoft.com/azure/storage/blobs/storage-blobs-overview [godevdl]: https://go.dev/dl/ [goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them -[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview -[storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell -[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli -[storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal -[azure_cli]: https://docs.microsoft.com/cli/azure +[go_samples]: https://github.com/Azure-Samples/azure-sdk-for-go-samples/tree/main +[storage_account_docs]: https://learn.microsoft.com/azure/storage/common/storage-account-overview +[storage_account_create_ps]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell +[storage_account_create_cli]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli +[storage_account_create_portal]: https://learn.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal +[azure_cli]: https://learn.microsoft.com/cli/azure [azure_sub]: https://azure.microsoft.com/free/ [azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity -[storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad +[storage_ad]: https://learn.microsoft.com/azure/storage/common/storage-auth-aad [azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError [samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go [append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go index fcb2a349..2229b7d8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -8,6 +8,9 @@ package appendblob import ( "context" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "os" "time" @@ -22,9 +25,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a client to an Azure Storage append blob; type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient] @@ -34,14 +35,16 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, - exported.ModuleVersion, runtime.PipelineOptions{}, - &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} + + azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } - return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -50,12 +53,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, - exported.ModuleVersion, - runtime.PipelineOptions{}, - &conOptions.ClientOptions) - return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -65,13 +69,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, - exported.ModuleVersion, - runtime.PipelineOptions{}, - &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -131,7 +136,7 @@ func (ab *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -143,7 +148,7 @@ func (ab *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil } // Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. @@ -255,14 +260,10 @@ func (ab *Client) SetLegalHold(ctx context.Context, legalHold bool, options *blo return ab.BlobClient().SetLegalHold(ctx, legalHold, options) } -// SetTier operation sets the tier on a blob. The operation is allowed on a page -// blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and -// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation -// does not update the blob's ETag. -// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +// SetTier +// Deprecated: SetTier only works for page blob in premium storage account and block blob in blob storage account. func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) { - return ab.BlobClient().SetTier(ctx, tier, o) + return blob.SetTierResponse{}, errors.New("operation will not work on this blob type. SetTier only works for page blob in premium storage account and block blob in blob storage account") } // SetExpiry operation sets an expiry time on an existing blob. This operation is only allowed on Hierarchical Namespace enabled accounts. @@ -282,6 +283,12 @@ func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption return ab.BlobClient().GetProperties(ctx, o) } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return ab.BlobClient().GetAccountInfo(ctx, o) +} + // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { @@ -326,10 +333,16 @@ func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.Get return ab.BlobClient().GetTags(ctx, o) } -// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. -// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +// CopyFromURL +// Deprecated: CopyFromURL works only with block blob func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) { - return ab.BlobClient().CopyFromURL(ctx, copySource, o) + return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob") +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at append blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (ab *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return ab.BlobClient().GetSASURL(permissions, expiry, o) } // Concurrent Download Functions ----------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go index d805283f..0834743f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/models.go @@ -100,6 +100,9 @@ func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOpt // AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method. type AppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // SourceContentValidation contains the validation mechanism used on the range of bytes read from the source. SourceContentValidation blob.SourceContentValidationType @@ -125,7 +128,8 @@ func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendB } options := &generated.AppendBlobClientAppendBlockFromURLOptions{ - SourceRange: exported.FormatHTTPRange(o.Range), + SourceRange: exported.FormatHTTPRange(o.Range), + CopySourceAuthorization: o.CopySourceAuthorization, } if o.SourceContentValidation != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json index 3ac1b0af..80d6183c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/storage/azblob", - "Tag": "go/storage/azblob_46e572d43a" + "Tag": "go/storage/azblob_0040e8284c" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go index 674dc285..d2421ddd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -8,15 +8,16 @@ package blob import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "io" "os" "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" @@ -25,9 +26,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. type Client base.Client[generated.BlobClient] @@ -37,12 +36,15 @@ type Client base.Client[generated.BlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, &cred)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -51,9 +53,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -63,10 +68,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -100,6 +108,10 @@ func (b *Client) sharedKey() *SharedKeyCredential { return base.SharedKey((*base.Client[generated.BlobClient])(b)) } +func (b *Client) credential() any { + return base.Credential((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (b *Client) URL() string { return b.generated().Endpoint() @@ -114,7 +126,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -126,7 +138,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil } // Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. @@ -259,8 +271,16 @@ func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetL // CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { - copyOptions, smac, mac, lac := options.format() - resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac) + copyOptions, smac, mac, lac, cpkScopeInfo := options.format() + resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac, cpkScopeInfo) + return resp, err +} + +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (b *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := b.generated().GetAccountInfo(ctx, getAccountInfoOptions) return resp, err } @@ -304,8 +324,8 @@ func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o // Concurrent Download Functions ----------------------------------------------------------------------------------------- -// download downloads an Azure blob to a WriterAt in parallel. -func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { +// downloadBuffer downloads an Azure blob to a WriterAt in parallel. +func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { if o.BlockSize == 0 { o.BlockSize = DefaultDownloadBlockSize } @@ -313,12 +333,11 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt count := o.Range.Count if count == CountToEnd { // If size not specified, calculate it // If we don't have the length at all, get it - downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{}, nil) - dr, err := b.DownloadStream(ctx, downloadBlobOptions) + gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) if err != nil { return 0, err } - count = *dr.ContentLength - o.Range.Offset + count = *gr.ContentLength - o.Range.Offset } if count <= 0 { @@ -334,6 +353,7 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt OperationName: "downloadBlobToWriterAt", TransferSize: count, ChunkSize: o.BlockSize, + NumChunks: uint16(((count - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, chunkStart int64, count int64) error { downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ @@ -372,6 +392,170 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt return count, nil } +// downloadFile downloads an Azure blob to a Writer. The blocks are downloaded parallely, +// but written to file serially +func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadOptions) (int64, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if o.BlockSize == 0 { + o.BlockSize = DefaultDownloadBlockSize + } + + if o.Concurrency == 0 { + o.Concurrency = DefaultConcurrency + } + + count := o.Range.Count + if count == CountToEnd { //Calculate size if not specified + gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) + if err != nil { + return 0, err + } + count = *gr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + progress := int64(0) + progressLock := &sync.Mutex{} + + // helper routine to get body + getBodyForRange := func(ctx context.Context, chunkStart, size int64) (io.ReadCloser, error) { + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: size, + }, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return nil, err + } + + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + return body, nil + } + + // if file fits in a single buffer, we'll download here. + if count <= o.BlockSize { + body, err := getBodyForRange(ctx, int64(0), count) + if err != nil { + return 0, err + } + defer body.Close() + + return io.Copy(writer, body) + } + + buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize) + defer buffers.Free() + acquireBuffer := func() ([]byte, error) { + select { + case b := <-buffers.Acquire(): + // got a buffer + return b, nil + default: + // no buffer available; allocate a new buffer if possible + if _, err := buffers.Grow(); err != nil { + return nil, err + } + + // either grab the newly allocated buffer or wait for one to become available + return <-buffers.Acquire(), nil + } + } + + numChunks := uint16((count-1)/o.BlockSize) + 1 + blocks := make([]chan []byte, numChunks) + for b := range blocks { + blocks[b] = make(chan []byte) + } + + /* + * We have created as many channels as the number of chunks we have. + * Each downloaded block will be sent to the channel matching its + * sequence number, i.e. 0th block is sent to 0th channel, 1st block + * to 1st channel and likewise. The blocks are then read and written + * to the file serially by below goroutine. Do note that the blocks + * are still downloaded parallelly from n/w, only serialized + * and written to file here. + */ + writerError := make(chan error) + writeSize := int64(0) + go func(ch chan error) { + for _, block := range blocks { + select { + case <-ctx.Done(): + return + case block := <-block: + n, err := writer.Write(block) + writeSize += int64(n) + buffers.Release(block[:cap(block)]) + if err != nil { + ch <- err + return + } + } + } + ch <- nil + }(writerError) + + // Prepare and do parallel download. + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + NumChunks: numChunks, + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, chunkStart int64, count int64) error { + buff, err := acquireBuffer() + if err != nil { + return err + } + + body, err := getBodyForRange(ctx, chunkStart, count) + if err != nil { + buffers.Release(buff) + return nil + } + + _, err = io.ReadFull(body, buff[:count]) + body.Close() + if err != nil { + return err + } + + blockIndex := chunkStart / o.BlockSize + blocks[blockIndex] <- buff[:count] + return nil + }, + }) + + if err != nil { + return 0, err + } + // error from writer thread. + if err = <-writerError; err != nil { + return 0, err + } + return writeSize, nil +} + // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { @@ -400,7 +584,7 @@ func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadB if o == nil { o = &DownloadBufferOptions{} } - return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) + return b.downloadBuffer(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) } // DownloadFile downloads an Azure blob to a local file. @@ -439,7 +623,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil } if size > 0 { - return b.download(ctx, file, *do) + return b.downloadFile(ctx, file, *do) } else { // if the blob's size is 0, there is no need in downloading it return 0, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go index c1563544..daef800e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -9,6 +9,7 @@ package blob import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( @@ -18,6 +19,9 @@ const ( // DefaultDownloadBlockSize is default block size DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + + // DefaultConcurrency is the default number of blocks downloaded or uploaded in parallel + DefaultConcurrency = shared.DefaultConcurrency ) // BlobType defines values for BlobType @@ -53,6 +57,7 @@ type AccessTier = generated.AccessTier const ( AccessTierArchive AccessTier = generated.AccessTierArchive AccessTierCool AccessTier = generated.AccessTierCool + AccessTierCold AccessTier = generated.AccessTierCold AccessTierHot AccessTier = generated.AccessTierHot AccessTierP10 AccessTier = generated.AccessTierP10 AccessTierP15 AccessTier = generated.AccessTierP15 @@ -148,6 +153,7 @@ type ArchiveStatus = generated.ArchiveStatus const ( ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot + ArchiveStatusRehydratePendingToCold ArchiveStatus = generated.ArchiveStatusRehydratePendingToCold ) // PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go index c73435cc..d7334688 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -51,7 +51,7 @@ type Tags = generated.BlobTag // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange // Request Model Declaration ------------------------------------------------------------------------------------------- @@ -458,7 +458,7 @@ type SetImmutabilityPolicyOptions struct { func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) { if o == nil { - return nil, nil + return &generated.BlobClientSetImmutabilityPolicyOptions{}, nil } ac := &exported.BlobAccessConditions{ ModifiedAccessConditions: o.ModifiedAccessConditions, @@ -544,11 +544,13 @@ type CopyFromURLOptions struct { SourceModifiedAccessConditions *SourceModifiedAccessConditions BlobAccessConditions *AccessConditions + + CPKScopeInfo *CPKScopeInfo } -func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { +func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions, *generated.CPKScopeInfo) { if o == nil { - return nil, nil, nil, nil + return nil, nil, nil, nil, nil } options := &generated.BlobClientCopyFromURLOptions{ @@ -563,5 +565,16 @@ func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) - return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, o.CPKScopeInfo +} + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.BlobClientGetAccountInfoOptions { + return nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go index 0e9e5ea4..352d9752 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/responses.go @@ -100,6 +100,9 @@ type SetLegalHoldResponse = generated.BlobClientSetLegalHoldResponse // CopyFromURLResponse contains the response from method BlobClient.CopyFromURL. type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse +// GetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo. +type GetAccountInfoResponse = generated.BlobClientGetAccountInfoResponse + // AcquireLeaseResponse contains the response from method BlobClient.AcquireLease. type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go index ad653c1c..07fad606 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -69,6 +69,7 @@ const ( CopyIDMismatch Code = "CopyIdMismatch" EmptyMetadataKey Code = "EmptyMetadataKey" FeatureVersionMismatch Code = "FeatureVersionMismatch" + ImmutabilityPolicyDeleteOnLockedPolicy Code = "ImmutabilityPolicyDeleteOnLockedPolicy" IncrementalCopyBlobMismatch Code = "IncrementalCopyBlobMismatch" IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" IncrementalCopySourceMustBeSnapshot Code = "IncrementalCopySourceMustBeSnapshot" @@ -122,6 +123,7 @@ const ( NoAuthenticationInformation Code = "NoAuthenticationInformation" NoPendingCopyOperation Code = "NoPendingCopyOperation" OperationNotAllowedOnIncrementalCopyBlob Code = "OperationNotAllowedOnIncrementalCopyBlob" + OperationNotAllowedOnRootBlob Code = "OperationNotAllowedOnRootBlob" OperationTimedOut Code = "OperationTimedOut" OutOfRangeInput Code = "OutOfRangeInput" OutOfRangeQueryParameterValue Code = "OutOfRangeQueryParameterValue" @@ -153,4 +155,5 @@ const ( var ( // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") + UnsupportedChecksum = errors.New("for multi-part uploads, user generated checksums cannot be validated") ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go index 340d4bc7..24df42c7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) // blockWriter provides methods to upload blocks that represent a file to a server and commit them. @@ -28,27 +29,8 @@ type blockWriter interface { CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) } -// bufferManager provides an abstraction for the management of buffers. -// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm. -type bufferManager[T ~[]byte] interface { - // Acquire returns the channel that contains the pool of buffers. - Acquire() <-chan T - - // Release releases the buffer back to the pool for reuse/cleanup. - Release(T) - - // Grow grows the number of buffers, up to the predefined max. - // It returns the total number of buffers or an error. - // No error is returned if the number of buffers has reached max. - // This is called only from the reading goroutine. - Grow() (int, error) - - // Free cleans up all buffers. - Free() -} - // copyFromReader copies a source io.Reader to blob storage using concurrent uploads. -func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) bufferManager[T]) (CommitBlockListResponse, error) { +func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) shared.BufferManager[T]) (CommitBlockListResponse, error) { options.setDefaults() wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing @@ -93,7 +75,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit } var n int - n, err = io.ReadFull(src, buffer) + n, err = shared.ReadAtLeast(src, buffer, len(buffer)) if n > 0 { // some data was read, upload it @@ -126,7 +108,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit } if err != nil { // The reader is done, no more outgoing buffers - if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + if errors.Is(err, io.EOF) { // these are expected errors, we don't surface those err = nil } else { @@ -265,49 +247,3 @@ func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { func (ubi uuidBlockID) ToBase64() string { return blockID(ubi).ToBase64() } - -// mmbPool implements the bufferManager interface. -// it uses anonymous memory mapped files for buffers. -// don't use this type directly, use newMMBPool() instead. -type mmbPool struct { - buffers chan mmb - count int - max int - size int64 -} - -func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] { - return &mmbPool{ - buffers: make(chan mmb, maxBuffers), - max: maxBuffers, - size: bufferSize, - } -} - -func (pool *mmbPool) Acquire() <-chan mmb { - return pool.buffers -} - -func (pool *mmbPool) Grow() (int, error) { - if pool.count < pool.max { - buffer, err := newMMB(pool.size) - if err != nil { - return 0, err - } - pool.buffers <- buffer - pool.count++ - } - return pool.count, nil -} - -func (pool *mmbPool) Release(buffer mmb) { - pool.buffers <- buffer -} - -func (pool *mmbPool) Free() { - for i := 0; i < pool.count; i++ { - buffer := <-pool.buffers - buffer.delete() - } - pool.count = 0 -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go index 64a86659..e3167b77 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -11,8 +11,13 @@ import ( "context" "encoding/base64" "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" + "math" "os" + "reflect" "sync" "time" @@ -30,9 +35,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client defines a set of operations applicable to block blobs. type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient] @@ -42,12 +45,15 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -56,9 +62,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -68,10 +78,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -116,7 +130,7 @@ func (bb *Client) URL() string { return bb.generated().Endpoint() } -// BlobClient returns the embedded blob client for this AppendBlob client. +// BlobClient returns the embedded blob client for this BlockBlob client. func (bb *Client) BlobClient() *blob.Client { blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb)) return (*blob.Client)(blobClient) @@ -131,7 +145,7 @@ func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -143,7 +157,7 @@ func (bb *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil } // Upload creates a new block blob or overwrites an existing block blob. @@ -161,10 +175,30 @@ func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *U opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, opts) + if err != nil { + return UploadResponse{}, err + } + } + resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) return resp, err } +// UploadBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. Partial updates are not supported with Put Blob from URL; the content of an existing blob is overwritten +// with the content of the new blob. To perform partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// For more information, see https://learn.microsoft.com/rest/api/storageservices/put-blob-from-url +func (bb *Client) UploadBlobFromURL(ctx context.Context, copySource string, options *UploadBlobFromURLOptions) (UploadBlobFromURLResponse, error) { + opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions := options.format() + + resp, err := bb.generated().PutBlobFromURL(ctx, int64(0), copySource, opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkSourceInfo, modifiedAccessConditions, sourceModifiedConditions) + + return resp, err +} + // StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. @@ -236,6 +270,11 @@ func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime, } + // If user attempts to pass in their own checksum, errors out. + if options.TransactionalContentMD5 != nil || options.TransactionalContentCRC64 != nil { + return CommitBlockListResponse{}, bloberror.UnsupportedChecksum + } + headers = options.HTTPHeaders leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) cpkInfo = options.CPKInfo @@ -316,6 +355,12 @@ func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption return bb.BlobClient().GetProperties(ctx, o) } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return bb.BlobClient().GetAccountInfo(ctx, o) +} + // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { @@ -366,35 +411,36 @@ func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return bb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at block blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (bb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return bb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Upload Functions ----------------------------------------------------------------------------------------- // uploadFromReader uploads a buffer in blocks to a block blob. func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actualSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) { - readerSize := actualSize if o.BlockSize == 0 { // If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error - if readerSize > MaxStageBlockBytes*MaxBlocks { + if actualSize > MaxStageBlockBytes*MaxBlocks { return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob") } // If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request - if readerSize <= MaxUploadBlobBytes { + if actualSize <= MaxUploadBlobBytes { o.BlockSize = MaxUploadBlobBytes // Default if unspecified } else { - if remainder := readerSize % MaxBlocks; remainder > 0 { - // ensure readerSize is a multiple of MaxBlocks - readerSize += (MaxBlocks - remainder) - } - o.BlockSize = readerSize / MaxBlocks // buffer / max blocks = block size to use all 50,000 blocks - if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = int64(math.Ceil(float64(actualSize) / MaxBlocks)) // ceil(buffer / max blocks) = block size to use all 50,000 blocks + if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB o.BlockSize = blob.DefaultDownloadBlockSize } // StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize). } } - if readerSize <= MaxUploadBlobBytes { + if actualSize <= MaxUploadBlobBytes { // If the size can fit in 1 Upload call, do it this way - var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + var body io.ReadSeeker = io.NewSectionReader(reader, 0, actualSize) if o.Progress != nil { body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress) } @@ -405,7 +451,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu return toUploadReaderAtResponseFromUploadResponse(resp), err } - var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + var numBlocks = uint16(((actualSize - 1) / o.BlockSize) + 1) if numBlocks > MaxBlocks { // prevent any math bugs from attempting to upload too many blocks which will always fail return uploadFromReaderResponse{}, errors.New("block limit exceeded") @@ -425,8 +471,9 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ OperationName: "uploadFromReader", - TransferSize: readerSize, + TransferSize: actualSize, ChunkSize: o.BlockSize, + NumChunks: uint16(((actualSize - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, offset int64, chunkSize int64) error { // This function is called once per block. @@ -481,6 +528,12 @@ func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBuff if o != nil { uploadOptions = *o } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadBufferResponse{}, bloberror.UnsupportedChecksum + } + return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) } @@ -494,6 +547,12 @@ func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOp if o != nil { uploadOptions = *o } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadFileResponse{}, bloberror.UnsupportedChecksum + } + return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) } @@ -504,7 +563,12 @@ func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStr o = &UploadStreamOptions{} } - result, err := copyFromReader(ctx, body, bb, *o, newMMBPool) + // If user attempts to pass in their own checksum, errors out. + if o.TransactionalValidation != nil && reflect.TypeOf(o.TransactionalValidation).Kind() != reflect.Func { + return UploadStreamResponse{}, bloberror.UnsupportedChecksum + } + + result, err := copyFromReader(ctx, body, bb, *o, shared.NewMMBPool) if err != nil { return CommitBlockListResponse{}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go index cb116264..ce3a5d8d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -37,3 +37,16 @@ const ( func PossibleBlockListTypeValues() []BlockListType { return generated.PossibleBlockListTypeValues() } + +// BlobCopySourceTags - can be 'COPY' or 'REPLACE' +type BlobCopySourceTags = generated.BlobCopySourceTags + +const ( + BlobCopySourceTagsCopy = generated.BlobCopySourceTagsCOPY + BlobCopySourceTagsReplace = generated.BlobCopySourceTagsREPLACE +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return generated.PossibleBlobCopySourceTagsValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go index 3da15aab..453d569e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -36,8 +36,9 @@ type UploadOptions struct { // Optional. Indicates the tier to be set on the blob. Tier *blob.AccessTier - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType HTTPHeaders *blob.HTTPHeaders CPKInfo *blob.CPKInfo @@ -46,6 +47,9 @@ type UploadOptions struct { LegalHold *bool ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentMD5 can be set by using TransactionalValidation instead + TransactionalContentMD5 []byte } func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, @@ -70,6 +74,60 @@ func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *gene // --------------------------------------------------------------------------------------------------------------------- +// UploadBlobFromURLOptions contains the optional parameters for the Client.UploadBlobFromURL method. +type UploadBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + Tags map[string]string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]*string + + // Optional. Specifies the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *blob.AccessTier + + // Additional optional headers + HTTPHeaders *blob.HTTPHeaders + AccessConditions *blob.AccessConditions + CPKInfo *blob.CPKInfo + CPKScopeInfo *blob.CPKScopeInfo + SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions +} + +func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFromURLOptions, *generated.BlobHTTPHeaders, + *generated.LeaseAccessConditions, *generated.CPKInfo, *generated.CPKScopeInfo, *generated.ModifiedAccessConditions, + *generated.SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := generated.BlockBlobClientPutBlobFromURLOptions{ + BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), + CopySourceAuthorization: o.CopySourceAuthorization, + CopySourceBlobProperties: o.CopySourceBlobProperties, + CopySourceTags: o.CopySourceTags, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &options, o.HTTPHeaders, leaseAccessConditions, o.CPKInfo, o.CPKScopeInfo, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// --------------------------------------------------------------------------------------------------------------------- + // StageBlockOptions contains the optional parameters for the Client.StageBlock method. type StageBlockOptions struct { CPKInfo *blob.CPKInfo @@ -140,8 +198,6 @@ type CommitBlockListOptions struct { RequestID *string Tier *blob.AccessTier Timeout *int32 - TransactionalContentCRC64 []byte - TransactionalContentMD5 []byte HTTPHeaders *blob.HTTPHeaders CPKInfo *blob.CPKInfo CPKScopeInfo *blob.CPKScopeInfo @@ -149,6 +205,12 @@ type CommitBlockListOptions struct { LegalHold *bool ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentCRC64 cannot be generated + TransactionalContentCRC64 []byte + + // Deprecated: TransactionalContentMD5 cannot be generated + TransactionalContentMD5 []byte } // --------------------------------------------------------------------------------------------------------------------- @@ -203,9 +265,10 @@ type uploadFromReaderOptions struct { TransactionalValidation blob.TransferValidationType - // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + // Deprecated: TransactionalContentCRC64 cannot be generated at block level TransactionalContentCRC64 uint64 - // Specify the transactional md5 for the body, to be validated by the service. + + // Deprecated: TransactionalContentMD5 cannot be generated at block level TransactionalContentMD5 []byte } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go index 00093ec1..917f7180 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/responses.go @@ -16,6 +16,9 @@ import ( // UploadResponse contains the response from method Client.Upload. type UploadResponse = generated.BlockBlobClientUploadResponse +// UploadBlobFromURLResponse contains the response from the method Client.UploadBlobFromURL +type UploadBlobFromURLResponse = generated.BlockBlobClientPutBlobFromURLResponse + // StageBlockResponse contains the response from method Client.StageBlock. type StageBlockResponse = generated.BlockBlobClientStageBlockResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml index e0623f50..03035033 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -26,3 +26,9 @@ stages: parameters: ServiceDirectory: 'storage/azblob' RunLiveTests: true + UsePipelineProxy: false + EnvVars: + AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID) + AZURE_TENANT_ID: $(AZBLOB_TENANT_ID) + AZURE_CLIENT_SECRET: $(AZBLOB_CLIENT_SECRET) + AZURE_SUBSCRIPTION_ID: $(AZBLOB_SUBSCRIPTION_ID) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go index 59299acb..5c4b719c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go @@ -13,14 +13,13 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. type Client struct { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go index 560e151d..48771e8c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go @@ -32,5 +32,5 @@ func ParseURL(u string) (URLParts, error) { // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange = exported.HTTPRange diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go new file mode 100644 index 00000000..83edea72 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/batch_builder.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package container + +import ( + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. +// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. +type BatchBuilder struct { + endpoint string + authPolicy policy.Policy + subRequests []*policy.Request + operationType *exported.BlobBatchOperationType +} + +func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { + if bb.operationType == nil { + bb.operationType = &operationType + return nil + } + if *bb.operationType != operationType { + return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) + } + return nil +} + +// Delete operation is used to add delete sub-request to the batch builder. +func (bb *BatchBuilder) Delete(blobName string, options *BatchDeleteOptions) error { + err := bb.checkOperationType(exported.BatchDeleteOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + deleteOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} + +// SetTier operation is used to add set tier sub-request to the batch builder. +func (bb *BatchBuilder) SetTier(blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { + err := bb.checkOperationType(exported.BatchSetTierOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + setTierOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go index 5de51e0d..3058b5d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -7,7 +7,11 @@ package container import ( + "bytes" "context" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "net/http" "net/url" @@ -28,9 +32,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to the Azure Storage container allowing you to manipulate its blobs. type Client base.Client[generated.ContainerClient] @@ -40,12 +42,15 @@ type Client base.Client[generated.ContainerClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, &cred)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -54,9 +59,12 @@ func NewClient(containerURL string, cred azcore.TokenCredential, options *Client // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -66,10 +74,13 @@ func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Cl func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -102,6 +113,15 @@ func (c *Client) sharedKey() *SharedKeyCredential { return base.SharedKey((*base.Client[generated.ContainerClient])(c)) } +func (c *Client) credential() any { + return base.Credential((*base.Client[generated.ContainerClient])(c)) +} + +// helper method to return the generated.BlobClient which is used for creating the sub-requests +func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (c *Client) URL() string { return c.generated().Endpoint() @@ -113,7 +133,7 @@ func (c *Client) URL() string { func (c *Client) NewBlobClient(blobName string) *blob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlobClient), c.credential())) } // NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of @@ -122,7 +142,7 @@ func (c *Client) NewBlobClient(blobName string) *blob.Client { func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.AppendBlobClient), c.sharedKey())) } // NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of @@ -131,7 +151,7 @@ func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlockBlobClient), c.sharedKey())) } // NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of @@ -140,7 +160,7 @@ func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.PageBlobClient), c.sharedKey())) } // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. @@ -190,7 +210,7 @@ func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, op // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) { // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. - // This allows us to not expose a GetProperties method at all simplifying the API. + // This allows us to not expose a GetMetadata method at all simplifying the API. // The optionals are nil, like they were in track 1.5 opts, leaseAccessConditions := o.format() @@ -226,6 +246,14 @@ func (c *Client) SetAccessPolicy(ctx context.Context, o *SetAccessPolicyOptions) return resp, err } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (c *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := c.generated().GetAccountInfo(ctx, getAccountInfoOptions) + return resp, err +} + // NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty // Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. // For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. @@ -253,7 +281,7 @@ func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[L if err != nil { return ListBlobsFlatResponse{}, err } - resp, err := c.generated().Pipeline().Do(req) + resp, err := c.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListBlobsFlatResponse{}, err } @@ -289,7 +317,7 @@ func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierar if err != nil { return ListBlobsHierarchyResponse{}, err } - resp, err := c.generated().Pipeline().Do(req) + resp, err := c.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListBlobsHierarchyResponse{}, err } @@ -329,3 +357,76 @@ func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Tim return endpoint, nil } + +// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. +// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. +// All sub-requests in the batch must be of the same type, either delete or set tier. +func (c *Client) NewBatchBuilder() (*BatchBuilder, error) { + var authPolicy policy.Policy + + switch cred := c.credential().(type) { + case *azcore.TokenCredential: + authPolicy = shared.NewStorageChallengePolicy(*cred) + case *SharedKeyCredential: + authPolicy = exported.NewSharedKeyCredPolicy(cred) + case nil: + // for authentication using SAS + authPolicy = nil + default: + return nil, fmt.Errorf("unrecognised authentication type %T", cred) + } + + return &BatchBuilder{ + endpoint: c.URL(), + authPolicy: authPolicy, + }, nil +} + +// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. +// It builds the request body using the BatchBuilder object passed. +// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. +func (c *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { + if bb == nil || len(bb.subRequests) == 0 { + return SubmitBatchResponse{}, errors.New("batch builder is empty") + } + + // create the request body + batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ + AuthPolicy: bb.authPolicy, + SubRequests: bb.subRequests, + }) + if err != nil { + return SubmitBatchResponse{}, err + } + + reader := bytes.NewReader(batchReq) + rsc := streaming.NopCloser(reader) + multipartContentType := "multipart/mixed; boundary=" + batchID + + resp, err := c.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) + if err != nil { + return SubmitBatchResponse{}, err + } + + batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) + if err != nil { + return SubmitBatchResponse{}, err + } + + return SubmitBatchResponse{ + Responses: batchResponses, + ContentType: resp.ContentType, + RequestID: resp.RequestID, + Version: resp.Version, + }, nil +} + +// FilterBlobs operation finds all blobs in the container whose tags match a given search expression. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags-container +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +func (c *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { + containerClientFilterBlobsOptions := o.format() + resp, err := c.generated().FilterBlobs(ctx, where, containerClientFilterBlobsOptions) + return resp, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go index d819ccb4..61d936ab 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -7,6 +7,7 @@ package container import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "reflect" "time" @@ -329,3 +330,98 @@ func formatTime(c *SignedIdentifier) error { return nil } + +// --------------------------------------------------------------------------------------------------------------------- + +// GetAccountInfoOptions provides set of options for Client.GetAccountInfo +type GetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *GetAccountInfoOptions) format() *generated.ContainerClientGetAccountInfoOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. +type BatchDeleteOptions struct { + blob.DeleteOptions + VersionID *string + Snapshot *string +} + +func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. +type BatchSetTierOptions struct { + blob.SetTierOptions + VersionID *string + Snapshot *string +} + +func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientSetTierOptions{ + RehydratePriority: o.RehydratePriority, + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. +type SubmitBatchOptions struct { + // placeholder for future options +} + +func (o *SubmitBatchOptions) format() *generated.ContainerClientSubmitBatchOptions { + return nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// FilterBlobsOptions provides set of options for Client.FilterBlobs. +type FilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 +} + +func (o *FilterBlobsOptions) format() *generated.ContainerClientFilterBlobsOptions { + if o == nil { + return nil + } + return &generated.ContainerClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go index 19023430..9aaefe27 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go @@ -7,6 +7,7 @@ package container import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) @@ -42,3 +43,27 @@ type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse // SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy. type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse + +// GetAccountInfoResponse contains the response from method Client.GetAccountInfo. +type GetAccountInfoResponse = generated.ContainerClientGetAccountInfoResponse + +// SubmitBatchResponse contains the response from method Client.SubmitBatch. +type SubmitBatchResponse struct { + // Responses contains the responses of the sub-requests in the batch + Responses []*BatchResponseItem + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem = exported.BatchResponseItem + +// FilterBlobsResponse contains the response from method Client.FilterBlobs. +type FilterBlobsResponse = generated.ContainerClientFilterBlobsResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go index d5b6ed6a..9a4806c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go @@ -51,7 +51,7 @@ Use the key as the credential parameter to authenticate the client: cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) handle(err) - serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) handle(err) fmt.Println(serviceClient.URL()) @@ -59,11 +59,12 @@ Use the key as the credential parameter to authenticate the client: Using a Connection String Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. -To do this, pass the connection string to the service client's `NewServiceClientFromConnectionString` method. +To do this, pass the connection string to the service client's `NewClientFromConnectionString` method. The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" - serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) + serviceClient, err := azblob.NewClientFromConnectionString(connStr, nil) + handle(err) Using a Shared Access Signature (SAS) Token @@ -82,20 +83,20 @@ You can generate a SAS token from the Azure Portal under Shared Access Signature cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) handle(err) - serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(serviceURL, cred, nil) handle(err) fmt.Println(serviceClient.URL()) // Alternatively, you can create SAS on the fly - resources := azblob.AccountSASResourceTypes{Service: true} - permission := azblob.AccountSASPermissions{Read: true} + resources := sas.AccountResourceTypes{Service: true} + permission := sas.AccountPermissions{Read: true} start := time.Now() expiry := start.AddDate(0, 0, 1) - serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry) + serviceURLWithSAS, err := serviceClient.ServiceClient().GetSASURL(resources, permission, expiry, &service.GetSASURLOptions{StartTime: &start}) handle(err) - serviceClientWithSAS, err := azblob.NewServiceClientWithNoCredential(serviceURLWithSAS, nil) + serviceClientWithSAS, err := azblob.NewClientWithNoCredential(serviceURLWithSAS, nil) handle(err) fmt.Println(serviceClientWithSAS.URL()) @@ -135,13 +136,13 @@ Examples handle(err) // The service URL for blob endpoints is usually in the form: http(s)://.blob.core.windows.net/ - serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + serviceClient, err := azblob.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) handle(err) // ===== 1. Create a container ===== // First, create a container client, and use the Create method to create a new container in your account - containerClient, err := serviceClient.NewContainerClient("testcontainer") + containerClient := serviceClient.ServiceClient().NewContainerClient("testcontainer") handle(err) // All APIs have an options' bag struct as a parameter. @@ -154,13 +155,13 @@ Examples uploadData := "Hello world!" // Create a new blockBlobClient from the containerClient - blockBlobClient, err := containerClient.NewBlockBlobClient("HelloWorld.txt") + blockBlobClient := containerClient.NewBlockBlobClient("HelloWorld.txt") handle(err) // Upload data to the block blob - blockBlobUploadOptions := azblob.BlockBlobUploadOptions{ - Metadata: map[string]string{"Foo": "Bar"}, - TagsMap: map[string]string{"Year": "2022"}, + blockBlobUploadOptions := blockblob.UploadOptions{ + Metadata: map[string]*string{"Foo": to.Ptr("Bar")}, + Tags: map[string]string{"Year": "2022"}, } _, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions) handle(err) @@ -175,10 +176,9 @@ Examples downloadData, err := io.ReadAll(reader) handle(err) if string(downloadData) != uploadData { - handle(errors.New("Uploaded data should be same as downloaded data")) + handle(errors.New("uploaded data should be same as downloaded data")) } - if err = reader.Close(); err != nil { handle(err) return @@ -189,18 +189,15 @@ Examples // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. // PageResponse() can be used to iterate over the results of the specific page. // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. - pager := containerClient.ListBlobsFlat(nil) - for pager.NextPage(context.TODO()) { - resp := pager.PageResponse() + pager := containerClient.NewListBlobsFlatPager(nil) + for pager.More() { + resp, err := pager.NextPage(context.TODO()) + handle(err) for _, v := range resp.Segment.BlobItems { fmt.Println(*v.Name) } } - if err = pager.Err(); err != nil { - handle(err) - } - // Delete the blob. _, err = blockBlobClient.Delete(context.TODO(), nil) handle(err) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go index 16e6cac0..c95f1925 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -7,14 +7,19 @@ package base import ( - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) +// ClientOptions contains the optional parameters when creating a Client. +type ClientOptions struct { + azcore.ClientOptions +} + type Client[T any] struct { - inner *T - sharedKey *exported.SharedKeyCredential + inner *T + credential any } func InnerClient[T any](client *Client[T]) *T { @@ -22,31 +27,40 @@ func InnerClient[T any](client *Client[T]) *T { } func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential { - return client.sharedKey + switch cred := client.credential.(type) { + case *exported.SharedKeyCredential: + return cred + default: + return nil + } +} + +func Credential[T any](client *Client[T]) any { + return client.credential } func NewClient[T any](inner *T) *Client[T] { return &Client[T]{inner: inner} } -func NewServiceClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] { +func NewServiceClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ServiceClient] { return &Client[generated.ServiceClient]{ - inner: generated.NewServiceClient(containerURL, pipeline), - sharedKey: sharedKey, + inner: generated.NewServiceClient(containerURL, azClient), + credential: credential, } } -func NewContainerClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ContainerClient] { +func NewContainerClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ContainerClient] { return &Client[generated.ContainerClient]{ - inner: generated.NewContainerClient(containerURL, pipeline), - sharedKey: sharedKey, + inner: generated.NewContainerClient(containerURL, azClient), + credential: credential, } } -func NewBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.BlobClient] { +func NewBlobClient(blobURL string, azClient *azcore.Client, credential any) *Client[generated.BlobClient] { return &Client[generated.BlobClient]{ - inner: generated.NewBlobClient(blobURL, pipeline), - sharedKey: sharedKey, + inner: generated.NewBlobClient(blobURL, azClient), + credential: credential, } } @@ -57,29 +71,32 @@ type CompositeClient[T, U any] struct { } func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { - return &Client[T]{inner: client.innerT}, client.innerU + return &Client[T]{ + inner: client.innerT, + credential: client.sharedKey, + }, client.innerU } -func NewAppendBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { +func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewAppendBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewAppendBlobClient(blobURL, azClient), sharedKey: sharedKey, } } -func NewBlockBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { +func NewBlockBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewBlockBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewBlockBlobClient(blobURL, azClient), sharedKey: sharedKey, } } -func NewPageBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { +func NewPageBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewPageBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewPageBlobClient(blobURL, azClient), sharedKey: sharedKey, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go new file mode 100644 index 00000000..02966ee3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go @@ -0,0 +1,279 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package exported + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" + "io" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "strconv" + "strings" +) + +const ( + batchIdPrefix = "batch_" + httpVersion = "HTTP/1.1" + httpNewline = "\r\n" +) + +// createBatchID is used for creating a new batch id which is used as batch boundary in the request body +func createBatchID() (string, error) { + batchID, err := uuid.New() + if err != nil { + return "", err + } + + return batchIdPrefix + batchID.String(), nil +} + +// buildSubRequest is used for building the sub-request. Example: +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Content-Length: 0 +func buildSubRequest(req *policy.Request) []byte { + var batchSubRequest strings.Builder + blobPath := req.Raw().URL.EscapedPath() + if len(req.Raw().URL.RawQuery) > 0 { + blobPath += "?" + req.Raw().URL.RawQuery + } + + batchSubRequest.WriteString(fmt.Sprintf("%s %s %s%s", req.Raw().Method, blobPath, httpVersion, httpNewline)) + + for k, v := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + continue + } + if len(v) > 0 { + batchSubRequest.WriteString(fmt.Sprintf("%v: %v%v", k, v[0], httpNewline)) + } + } + + batchSubRequest.WriteString(httpNewline) + return []byte(batchSubRequest.String()) +} + +// CreateBatchRequest creates a new batch request using the sub-requests present in the BlobBatchBuilder. +// +// Example of a sub-request in the batch request body: +// +// --batch_357de4f7-6d0b-4e02-8cd2-6361411a9525 +// Content-Type: application/http +// Content-Transfer-Encoding: binary +// Content-ID: 0 +// +// DELETE /container0/blob0 HTTP/1.1 +// x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT +// Authorization: SharedKey account:G4jjBXA7LI/RnWKIOQ8i9xH4p76pAQ+4Fs4R1VxasaE= +// Content-Length: 0 +func CreateBatchRequest(bb *BlobBatchBuilder) ([]byte, string, error) { + batchID, err := createBatchID() + if err != nil { + return nil, "", err + } + + // Create a new multipart buffer + reqBody := &bytes.Buffer{} + writer := multipart.NewWriter(reqBody) + + // Set the boundary + err = writer.SetBoundary(batchID) + if err != nil { + return nil, "", err + } + + partHeaders := make(textproto.MIMEHeader) + partHeaders["Content-Type"] = []string{"application/http"} + partHeaders["Content-Transfer-Encoding"] = []string{"binary"} + var partWriter io.Writer + + for i, req := range bb.SubRequests { + if bb.AuthPolicy != nil { + _, err := bb.AuthPolicy.Do(req) + if err != nil && !strings.EqualFold(err.Error(), "no more policies") { + if log.Should(EventSubmitBatch) { + log.Writef(EventSubmitBatch, "failed to authorize sub-request for %v.\nError: %v", req.Raw().URL.Path, err.Error()) + } + return nil, "", err + } + } + + partHeaders["Content-ID"] = []string{fmt.Sprintf("%v", i)} + partWriter, err = writer.CreatePart(partHeaders) + if err != nil { + return nil, "", err + } + + _, err = partWriter.Write(buildSubRequest(req)) + if err != nil { + return nil, "", err + } + } + + // Close the multipart writer + err = writer.Close() + if err != nil { + return nil, "", err + } + + return reqBody.Bytes(), batchID, nil +} + +// UpdateSubRequestHeaders updates the sub-request headers. +// Removes x-ms-version header. +func UpdateSubRequestHeaders(req *policy.Request) { + // remove x-ms-version header from the request header + for k := range req.Raw().Header { + if strings.EqualFold(k, shared.HeaderXmsVersion) { + delete(req.Raw().Header, k) + } + } +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem struct { + ContentID *int + ContainerName *string + BlobName *string + RequestID *string + Version *string + Error error // nil error indicates that the batch sub-request operation is successful +} + +func getResponseBoundary(contentType *string) (string, error) { + if contentType == nil { + return "", fmt.Errorf("Content-Type returned in SubmitBatch response is nil") + } + + _, params, err := mime.ParseMediaType(*contentType) + if err != nil { + return "", err + } + + if val, ok := params["boundary"]; ok { + return val, nil + } else { + return "", fmt.Errorf("batch boundary not present in Content-Type header of the SubmitBatch response.\nContent-Type: %v", *contentType) + } +} + +func getContentID(part *multipart.Part) (*int, error) { + contentID := part.Header.Get("Content-ID") + if contentID == "" { + return nil, nil + } + + val, err := strconv.Atoi(strings.TrimSpace(contentID)) + if err != nil { + return nil, err + } + return &val, nil +} + +func getResponseHeader(key string, resp *http.Response) *string { + val := resp.Header.Get(key) + if val == "" { + return nil + } + return &val +} + +// ParseBlobBatchResponse is used for parsing the batch response body into individual sub-responses for each item in the batch. +func ParseBlobBatchResponse(respBody io.ReadCloser, contentType *string, subRequests []*policy.Request) ([]*BatchResponseItem, error) { + boundary, err := getResponseBoundary(contentType) + if err != nil { + return nil, err + } + + respReader := multipart.NewReader(respBody, boundary) + var responses []*BatchResponseItem + + for { + part, err := respReader.NextPart() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, err + } + + batchSubResponse := &BatchResponseItem{} + batchSubResponse.ContentID, err = getContentID(part) + if err != nil { + return nil, err + } + + if batchSubResponse.ContentID != nil { + path := strings.Trim(subRequests[*batchSubResponse.ContentID].Raw().URL.Path, "/") + p := strings.Split(path, "/") + batchSubResponse.ContainerName = to.Ptr(p[0]) + batchSubResponse.BlobName = to.Ptr(strings.Join(p[1:], "/")) + } + + respBytes, err := io.ReadAll(part) + if err != nil { + return nil, err + } + respBytes = append(respBytes, byte('\n')) + buf := bytes.NewBuffer(respBytes) + resp, err := http.ReadResponse(bufio.NewReader(buf), nil) + // sub-response parsing error + if err != nil { + return nil, err + } + + batchSubResponse.RequestID = getResponseHeader(shared.HeaderXmsRequestID, resp) + batchSubResponse.Version = getResponseHeader(shared.HeaderXmsVersion, resp) + + // sub-response failure + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if len(responses) == 0 && batchSubResponse.ContentID == nil { + // this case can happen when the parent request fails. + // For example, batch request having more than 256 sub-requests. + return nil, fmt.Errorf("%v", string(respBytes)) + } + + resp.Request = subRequests[*batchSubResponse.ContentID].Raw() + batchSubResponse.Error = runtime.NewResponseError(resp) + } + + responses = append(responses, batchSubResponse) + } + + if len(responses) != len(subRequests) { + return nil, fmt.Errorf("expected %v responses, got %v for the batch ID: %v", len(subRequests), len(responses), boundary) + } + + return responses, nil +} + +// not exported but used for batch request creation + +// BlobBatchBuilder is used for creating the blob batch request +type BlobBatchBuilder struct { + AuthPolicy policy.Policy + SubRequests []*policy.Request +} + +// BlobBatchOperationType defines the operation of the blob batch sub-requests. +type BlobBatchOperationType string + +const ( + BatchDeleteOperationType BlobBatchOperationType = "delete" + BatchSetTierOperationType BlobBatchOperationType = "set tier" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go index 9bc1ca47..d0355727 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go @@ -13,7 +13,7 @@ import ( // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange -// which has an offset but no zero value count indicates from the offset to the resource's end. +// which has an offset and zero value count indicates from the offset to the resource's end. type HTTPRange struct { Offset int64 Count int64 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go index 9a368d0c..d775fb5c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/log_events.go @@ -1,5 +1,8 @@ +//go:build go1.18 +// +build go1.18 + // Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. +// Licensed under the MIT License. See License.txt in the project root for license information. package exported @@ -11,4 +14,7 @@ import ( const ( // EventUpload is used when we compute number of blocks to upload and size of each block. EventUpload log.Event = "azblob.Upload" + + // EventSubmitBatch is used for logging events related to submit blob batch operation. + EventSubmitBatch log.Event = "azblob.SubmitBatch" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go index d1563105..bd0bd5e2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -172,7 +172,7 @@ func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, er // Join the sorted key values separated by ',' // Then prepend "keyName:"; then add this string to the buffer - cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + cr.WriteString("\n" + strings.ToLower(paramName) + ":" + strings.Join(paramValues, ",")) } } return cr.String(), nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go index 784310d7..c8be74c2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -8,5 +8,5 @@ package exported const ( ModuleName = "azblob" - ModuleVersion = "v1.0.0" + ModuleVersion = "v1.2.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go index 3b6184fe..288df7ed 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go @@ -8,12 +8,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *AppendBlobClient) Endpoint() string { return client.endpoint } -func (client *AppendBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *AppendBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewAppendBlobClient(endpoint string, azClient *azcore.Client) *AppendBlobClient { + client := &AppendBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md index 6b3e03c7..25deeec3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -7,7 +7,7 @@ go: true clear-output-folder: false version: "^3.0.0" license-header: MICROSOFT_MIT_NO_VERSION -input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json" credential-scope: "https://storage.azure.com/.default" output-folder: ../generated file-prefix: "zz_" @@ -19,7 +19,43 @@ modelerfour: seal-single-value-enum-by-default: true lenient-model-deduplication: true export-clients: true -use: "@autorest/go@4.0.0-preview.45" +use: "@autorest/go@4.0.0-preview.61" +``` + +### Updating service version to 2023-08-03 +```yaml +directive: +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`). + replaceAll(`2021-12-02`, `2023-08-03`); +``` + +### Undo breaking change with BlobName +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/Name\s+\*BlobName/g, `Name *string`); +``` + +### Removing UnmarshalXML for BlobItems to create customer UnmarshalXML function +```yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal["x-ms-go-omit-serde-methods"] = true; ``` ### Remove pager methods and export various generated methods in container client @@ -30,7 +66,7 @@ directive: where: $ transform: >- return $. - replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). + replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `//\n// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`). replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`). replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`); ``` @@ -43,7 +79,7 @@ directive: where: $ transform: >- return $. - replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `// listContainersSegmentCreateRequest creates the ListContainersSegment request`). + replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `//\n// listContainersSegmentCreateRequest creates the ListContainersSegment request`). replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`). replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`); ``` @@ -244,7 +280,9 @@ directive: ``` yaml directive: -- from: zz_models.go +- from: + - zz_models.go + - zz_options.go where: $ transform: >- return $. @@ -384,4 +422,54 @@ directive: transform: >- return $. replace(/xml:"CORS>CORSRule"/g, "xml:\"Cors>CorsRule\""); -``` \ No newline at end of file +``` + +### Fix Content-Type header in submit batch request + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replace (/req.SetBody\(body\,\s+\"application\/xml\"\)/g, `req.SetBody(body, multipartContentType)`); +``` + +### Fix response status code check in submit batch request + +``` yaml +directive: +- from: zz_service_client.go + where: $ + transform: >- + return $. + replace(/if\s+!runtime\.HasStatusCode\(httpResp,\s+http\.StatusOK\)\s+\{\s+err\s+=\s+runtime\.NewResponseError\(httpResp\)\s+return ServiceClientSubmitBatchResponse\{\}\,\s+err\s+}/g, + `if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {\n\t\terr = runtime.NewResponseError(httpResp)\n\t\treturn ServiceClientSubmitBatchResponse{}, err\n\t}`); +``` + +### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers + +``` yaml +directive: +- from: + - zz_container_client.go + - zz_blob_client.go + - zz_appendblob_client.go + - zz_blockblob_client.go + - zz_pageblob_client.go + where: $ + transform: >- + return $. + replace (/req\.Raw\(\)\.Header\[\"If-Modified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"If-Unmodified-Since\"\]\s+=\s+\[\]string\{modifiedAccessConditions\.IfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-modified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfModifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-source-if-unmodified-since\"\]\s+=\s+\[\]string\{sourceModifiedAccessConditions\.SourceIfUnmodifiedSince\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}`). + replace (/req\.Raw\(\)\.Header\[\"x-ms-immutability-policy-until-date\"\]\s+=\s+\[\]string\{options\.ImmutabilityPolicyExpiry\.Format\(time\.RFC1123\)\}/g, + `req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}`); + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go index c3d3c260..343073b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -6,12 +6,39 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "time" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) func (client *BlobClient) Endpoint() string { return client.endpoint } -func (client *BlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *BlobClient) InternalClient() *azcore.Client { + return client.internal +} + +func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) +} + +func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) +} + +// NewBlobClient creates a new instance of BlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlobClient(endpoint string, azClient *azcore.Client) *BlobClient { + client := &BlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go index a43e327e..873d9a41 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go @@ -8,12 +8,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *BlockBlobClient) Endpoint() string { return client.endpoint } -func (client *BlockBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *BlockBlobClient) Internal() *azcore.Client { + return client.internal +} + +// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlockBlobClient(endpoint string, azClient *azcore.Client) *BlockBlobClient { + client := &BlockBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go new file mode 100644 index 00000000..8c13c441 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +const ServiceVersion = "2023-08-03" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go index bbbf828a..d43b2c78 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *ContainerClient) Endpoint() string { return client.endpoint } -func (client *ContainerClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *ContainerClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewContainerClient creates a new instance of ContainerClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewContainerClient(endpoint string, azClient *azcore.Client) *ContainerClient { + client := &ContainerClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go index 759d9263..aaef9f53 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go @@ -6,6 +6,12 @@ package generated +import ( + "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/url" +) + type TransactionalContentSetter interface { SetCRC64([]byte) SetMD5([]byte) @@ -35,6 +41,14 @@ func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) { p.TransactionalContentMD5 = v } +func (b *BlockBlobClientUploadOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientUploadOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + type SourceContentSetter interface { SetSourceContentCRC64(v []byte) SetSourceContentMD5(v []byte) @@ -63,3 +77,65 @@ func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) { p.SourceContentMD5 = v } + +// Custom UnmarshalXML functions for types that need special handling. + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPrefix. +func (b *BlobPrefix) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobPrefix + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. +func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobItem + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go index 8a212cc3..a7c76208 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *PageBlobClient) Endpoint() string { return client.endpoint } -func (client *PageBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *PageBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewPageBlobClient(endpoint string, azClient *azcore.Client) *PageBlobClient { + client := &PageBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go index 1f449b95..32c15a2b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *ServiceClient) Endpoint() string { return client.endpoint } -func (client *ServiceClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *ServiceClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient { + client := &ServiceClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go index 3742e972..dbfe069e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -22,21 +21,10 @@ import ( ) // AppendBlobClient contains the methods for the AppendBlob group. -// Don't use this type directly, use NewAppendBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type AppendBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient { - client := &AppendBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append @@ -44,7 +32,7 @@ func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient // AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - body - Initial data // - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. @@ -55,18 +43,21 @@ func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) { + var err error req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockResponse{}, err } - return client.appendBlockHandleResponse(resp) + resp, err := client.appendBlockHandleResponse(httpResp) + return resp, err } // appendBlockCreateRequest creates the AppendBlock request. @@ -110,10 +101,10 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -124,33 +115,33 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // appendBlockHandleResponse handles the AppendBlock response. func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) { result := AppendBlobClientAppendBlockResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -159,14 +150,12 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -175,16 +164,14 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -193,11 +180,18 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -207,7 +201,7 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - sourceURL - Specify a URL to the copy source. // - contentLength - The length of the request. // - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL @@ -221,18 +215,21 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) { + var err error req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientAppendBlockFromURLResponse{}, err } - return client.appendBlockFromURLHandleResponse(resp) + resp, err := client.appendBlockFromURLHandleResponse(httpResp) + return resp, err } // appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. @@ -283,10 +280,10 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -298,10 +295,10 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -309,7 +306,7 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -323,22 +320,16 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont // appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) { result := AppendBlobClientAppendBlockFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.BlobCommittedBlockCount = &blobCommittedBlockCount } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -347,11 +338,12 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -360,16 +352,8 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { - result.BlobAppendOffset = &val - } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return AppendBlobClientAppendBlockFromURLResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -384,13 +368,26 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp } result.IsServerEncrypted = &isServerEncrypted } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // Create - The Create Append Blob operation creates a new append blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. @@ -399,18 +396,21 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return AppendBlobClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -467,10 +467,10 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -481,7 +481,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -489,7 +489,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -504,15 +504,8 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content // createHandleResponse handles the Create response. func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) { result := AppendBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -521,18 +514,6 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -540,6 +521,15 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -547,11 +537,21 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -560,25 +560,28 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen // or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock // method. func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) { + var err error req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) if err != nil { return AppendBlobClientSealResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientSealResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return AppendBlobClientSealResponse{}, err } - return client.sealHandleResponse(resp) + resp, err := client.sealHandleResponse(httpResp) + return resp, err } // sealCreateRequest creates the Seal request. @@ -593,7 +596,7 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -601,10 +604,10 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -622,25 +625,9 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * // sealHandleResponse handles the Seal response. func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) { result := AppendBlobClientSealResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return AppendBlobClientSealResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -648,6 +635,9 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) if err != nil { @@ -655,5 +645,18 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB } result.IsSealed = &isSealed } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return AppendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index 6a4b7ed6..caaa3dfe 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -23,44 +22,36 @@ import ( ) // BlobClient contains the methods for the Blob group. -// Don't use this type directly, use NewBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type BlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewBlobClient creates a new instance of BlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient { - client := &BlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination // blob with zero length and full metadata. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. // - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) { + var err error req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientAbortCopyFromURLResponse{}, err } - return client.abortCopyFromURLHandleResponse(resp) + resp, err := client.abortCopyFromURLHandleResponse(httpResp) + return resp, err } // abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. @@ -80,7 +71,7 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -94,12 +85,6 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -107,31 +92,40 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. // - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) { + var err error req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. @@ -152,10 +146,10 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -166,7 +160,7 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -177,6 +171,16 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio // acquireLeaseHandleResponse handles the AcquireLease response. func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) { result := BlobClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -190,44 +194,37 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientBreakLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -247,10 +244,10 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -261,7 +258,7 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -272,6 +269,16 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * // breakLeaseHandleResponse handles the BreakLease response. func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) { result := BlobClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -290,29 +297,19 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -320,18 +317,21 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli // - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return BlobClientChangeLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -350,10 +350,10 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -364,7 +364,7 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -375,6 +375,16 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID // changeLeaseHandleResponse handles the ChangeLease response. func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) { result := BlobClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -385,25 +395,15 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } - if val := resp.Header.Get("x-ms-lease-id"); val != "" { - result.LeaseID = &val - } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -411,7 +411,7 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // until the copy is complete. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -420,23 +420,27 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) { - req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) { + var err error + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo) if err != nil { return BlobClientCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientCopyFromURLResponse{}, err } - return client.copyFromURLHandleResponse(resp) + resp, err := client.copyFromURLHandleResponse(httpResp) + return resp, err } // copyFromURLCreateRequest creates the CopyFromURL request. -func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -458,10 +462,10 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -470,10 +474,10 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -488,7 +492,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -499,7 +503,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -510,6 +514,12 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour if options != nil && options.CopySourceAuthorization != nil { req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -517,34 +527,22 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour // copyFromURLHandleResponse handles the CopyFromURL response. func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) { result := BlobClientCopyFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.Date = &date + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val @@ -552,19 +550,34 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientCopyFromURLResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -572,25 +585,28 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl // CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) { + var err error req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCreateSnapshotResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlobClientCreateSnapshotResponse{}, err } - return client.createSnapshotHandleResponse(resp) + resp, err := client.createSnapshotHandleResponse(httpResp) + return resp, err } // createSnapshotCreateRequest creates the CreateSnapshot request. @@ -625,10 +641,10 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -642,7 +658,7 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -653,12 +669,26 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio // createSnapshotHandleResponse handles the CreateSnapshot response. func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) { result := BlobClientCreateSnapshotResponse{} - if val := resp.Header.Get("x-ms-snapshot"); val != "" { - result.Snapshot = &val + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.Date = &date } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("Last-Modified"); val != "" { lastModified, err := time.Parse(time.RFC1123, val) if err != nil { @@ -666,32 +696,18 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientCreateSnapshotResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted - } return result, nil } @@ -708,23 +724,26 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo // return an HTTP status code of 404 (ResourceNotFound). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientDeleteResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -754,10 +773,10 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -768,7 +787,7 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -782,12 +801,6 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -795,28 +808,37 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy // method. func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { + var err error req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientDeleteImmutabilityPolicyResponse{}, err } - return client.deleteImmutabilityPolicyHandleResponse(resp) + resp, err := client.deleteImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. @@ -831,7 +853,7 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -845,12 +867,6 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -858,6 +874,12 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -865,24 +887,27 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp // can also call Download to read a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) { + var err error req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientDownloadResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDownloadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { - return BlobClientDownloadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + err = runtime.NewResponseError(httpResp) + return BlobClientDownloadResponse{}, err } - return client.downloadHandleResponse(resp) + resp, err := client.downloadHandleResponse(httpResp) + return resp, err } // downloadCreateRequest creates the Download request. @@ -925,10 +950,10 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -939,7 +964,7 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -950,76 +975,75 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl // downloadHandleResponse handles the Download response. func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) { result := BlobClientDownloadResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientDownloadResponse{}, err } - result.LastModified = &lastModified - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err } + result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentLength = &contentLength + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-Disposition"); val != "" { result.ContentDisposition = &val } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1028,9 +1052,6 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1043,36 +1064,15 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } - if val := resp.Header.Get("x-ms-is-current-version"); val != "" { - isCurrentVersion, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.IsCurrentVersion = &isCurrentVersion - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + result.CreationTime = &creationTime } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1081,20 +1081,8 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount - } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) - if err != nil { - return BlobClientDownloadResponse{}, err - } - result.IsServerEncrypted = &isServerEncrypted + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1102,19 +1090,25 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.BlobContentMD5 = blobContentMD5 + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) if err != nil { return BlobClientDownloadResponse{}, err } - result.TagCount = &tagCount + result.IsCurrentVersion = &isCurrentVersion } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) @@ -1123,6 +1117,13 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.IsSealed = &isSealed } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1130,15 +1131,21 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientDownloadResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) @@ -1147,15 +1154,40 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LegalHold = &legalHold } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientDownloadResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.TagCount = &tagCount } - if val := resp.Header.Get("x-ms-error-code"); val != "" { - result.ErrorCode = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -1163,21 +1195,24 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -1190,7 +1225,7 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -1198,15 +1233,12 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) { result := BlobClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1214,11 +1246,14 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } if val := resp.Header.Get("x-ms-sku-name"); val != "" { result.SKUName = (*SKUName)(&val) } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1227,24 +1262,27 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo // for the blob. It does not return the content of the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -1277,10 +1315,10 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1291,7 +1329,7 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1302,82 +1340,61 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option // getPropertiesHandleResponse handles the GetProperties response. func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) { result := BlobClientGetPropertiesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.LastModified = &lastModified + result.AccessTierChangeTime = &accessTierChangeTime } - if val := resp.Header.Get("x-ms-creation-time"); val != "" { - creationTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.CreationTime = &creationTime - } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { - result.ObjectReplicationPolicyID = &val - } - for hh := range resp.Header { - if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) - } + result.AccessTierInferred = &accessTierInferred } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val } - if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { - copyCompletionTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.CopyCompletionTime = ©CompletionTime - } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-progress"); val != "" { - result.CopyProgress = &val - } - if val := resp.Header.Get("x-ms-copy-source"); val != "" { - result.CopySource = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { - isIncrementalCopy, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.IsIncrementalCopy = &isIncrementalCopy + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { - result.DestinationSnapshot = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val } if val := resp.Header.Get("Content-Length"); val != "" { contentLength, err := strconv.ParseInt(val, 10, 64) @@ -1386,12 +1403,6 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.ContentLength = &contentLength } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -1399,58 +1410,50 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Content-Disposition"); val != "" { - result.ContentDisposition = &val - } - if val := resp.Header.Get("Content-Language"); val != "" { - result.ContentLanguage = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.Date = &date + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + result.CreationTime = &creationTime } - if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.IsServerEncrypted = &isServerEncrypted + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { result.EncryptionKeySHA256 = &val @@ -1458,28 +1461,22 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { result.EncryptionScope = &val } - if val := resp.Header.Get("x-ms-access-tier"); val != "" { - result.AccessTier = &val - } - if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { - accessTierInferred, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierInferred = &accessTierInferred - } - if val := resp.Header.Get("x-ms-archive-status"); val != "" { - result.ArchiveStatus = &val + result.ExpiresOn = &expiresOn } - if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { - accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.AccessTierChangeTime = &accessTierChangeTime + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } if val := resp.Header.Get("x-ms-is-current-version"); val != "" { isCurrentVersion, err := strconv.ParseBool(val) @@ -1488,19 +1485,12 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.IsCurrentVersion = &isCurrentVersion } - if val := resp.Header.Get("x-ms-tag-count"); val != "" { - tagCount, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return BlobClientGetPropertiesResponse{}, err - } - result.TagCount = &tagCount - } - if val := resp.Header.Get("x-ms-expiry-time"); val != "" { - expiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ExpiresOn = &expiresOn + result.IsIncrementalCopy = &isIncrementalCopy } if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { isSealed, err := strconv.ParseBool(val) @@ -1509,8 +1499,12 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.IsSealed = &isSealed } - if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { - result.RehydratePriority = &val + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted } if val := resp.Header.Get("x-ms-last-access-time"); val != "" { lastAccessed, err := time.Parse(time.RFC1123, val) @@ -1519,15 +1513,21 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.LastAccessed = &lastAccessed } - if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { - immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlobClientGetPropertiesResponse{}, err } - result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { - result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } if val := resp.Header.Get("x-ms-legal-hold"); val != "" { legalHold, err := strconv.ParseBool(val) @@ -1536,29 +1536,70 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob } result.LegalHold = &legalHold } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return BlobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } return result, nil } // GetTags - The Get Tags operation enables users to get the tags associated with a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) { + var err error req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientGetTagsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientGetTagsResponse{}, err } - return client.getTagsHandleResponse(resp) + resp, err := client.getTagsHandleResponse(httpResp) + return resp, err } // getTagsCreateRequest creates the GetTags request. @@ -1579,7 +1620,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1599,12 +1640,6 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1612,6 +1647,12 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { return BlobClientGetTagsResponse{}, err } @@ -1621,24 +1662,27 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient // Query - The Query operation enables users to select/project on blob data by providing simple query expressions. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) { + var err error req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions) if err != nil { return BlobClientQueryResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientQueryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { - return BlobClientQueryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientQueryResponse{}, err } - return client.queryHandleResponse(resp) + resp, err := client.queryHandleResponse(httpResp) + return resp, err } // queryCreateRequest creates the Query request. @@ -1670,10 +1714,10 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1684,13 +1728,16 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.QueryRequest != nil { - return req, runtime.MarshalAsXML(req, *options.QueryRequest) + if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { + return nil, err + } + return req, nil } return req, nil } @@ -1698,65 +1745,75 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC // queryHandleResponse handles the Query response. func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) { result := BlobClientQueryResponse{Body: resp.Body} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) if err != nil { return BlobClientQueryResponse{}, err } - result.LastModified = &lastModified + result.BlobCommittedBlockCount = &blobCommittedBlockCount } - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err } + result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("Content-Length"); val != "" { - contentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentLength = &contentLength + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) } - if val := resp.Header.Get("Content-Range"); val != "" { - result.ContentRange = &val + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlobClientQueryResponse{}, err } - result.ContentMD5 = contentMD5 - } - if val := resp.Header.Get("Content-Encoding"); val != "" { - result.ContentEncoding = &val - } - if val := resp.Header.Get("Cache-Control"); val != "" { - result.CacheControl = &val + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-Disposition"); val != "" { result.ContentDisposition = &val } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } if val := resp.Header.Get("Content-Language"); val != "" { result.ContentLanguage = &val } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlobClientQueryResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber + result.ContentLength = &contentLength } - if val := resp.Header.Get("x-ms-blob-type"); val != "" { - result.BlobType = (*BlobType)(&val) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { copyCompletionTime, err := time.Parse(time.RFC1123, val) @@ -1765,9 +1822,6 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.CopyCompletionTime = ©CompletionTime } - if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { - result.CopyStatusDescription = &val - } if val := resp.Header.Get("x-ms-copy-id"); val != "" { result.CopyID = &val } @@ -1780,26 +1834,8 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu if val := resp.Header.Get("x-ms-copy-status"); val != "" { result.CopyStatus = (*CopyStatusType)(&val) } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("Accept-Ranges"); val != "" { - result.AcceptRanges = &val + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1808,13 +1844,14 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { - blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) - blobCommittedBlockCount := int32(blobCommittedBlockCount32) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.BlobCommittedBlockCount = &blobCommittedBlockCount + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -1823,25 +1860,35 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientQueryResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) } - if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { - blobContentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) } - result.BlobContentMD5 = blobContentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlobClientQueryResponse{}, err - } - result.ContentCRC64 = contentCRC64 + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1849,23 +1896,26 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu // ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -1883,10 +1933,10 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID req.Raw().Header["x-ms-lease-action"] = []string{"release"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1897,7 +1947,7 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1908,6 +1958,16 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) { result := BlobClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1918,45 +1978,38 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return BlobClientRenewLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -1974,10 +2027,10 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s req.Raw().Header["x-ms-lease-action"] = []string{"renew"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1988,7 +2041,7 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1999,6 +2052,16 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s // renewLeaseHandleResponse handles the RenewLease response. func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) { result := BlobClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2012,44 +2075,37 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // SetExpiry - Sets the time a blob will expire and be deleted. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - expiryOptions - Required. Indicates mode of the expiry time // - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { + var err error req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) if err != nil { return BlobClientSetExpiryResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetExpiryResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetExpiryResponse{}, err } - return client.setExpiryHandleResponse(resp) + resp, err := client.setExpiryHandleResponse(httpResp) + return resp, err } // setExpiryCreateRequest creates the SetExpiry request. @@ -2064,7 +2120,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2079,6 +2135,16 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti // setExpiryHandleResponse handles the SetExpiry response. func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) { result := BlobClientSetExpiryResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetExpiryResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2089,46 +2155,39 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetExpiryResponse{}, err - } - result.Date = &date - } return result, nil } // SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) { + var err error req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetHTTPHeadersResponse{}, err } - return client.setHTTPHeadersHandleResponse(resp) + resp, err := client.setHTTPHeadersHandleResponse(httpResp) + return resp, err } // setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. @@ -2162,10 +2221,10 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2179,7 +2238,7 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2190,16 +2249,6 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio // setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) { result := BlobClientSetHTTPHeadersResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetHTTPHeadersResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -2210,12 +2259,6 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2223,29 +2266,48 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) { + var err error req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetImmutabilityPolicyResponse{}, err } - return client.setImmutabilityPolicyHandleResponse(resp) + resp, err := client.setImmutabilityPolicyHandleResponse(httpResp) + return resp, err } // setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. @@ -2260,15 +2322,15 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -2283,12 +2345,6 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2306,28 +2362,37 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val) } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - legalHold - Specified if a legal hold should be set on the blob. // - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { + var err error req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetLegalHoldResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetLegalHoldResponse{}, err } - return client.setLegalHoldHandleResponse(resp) + resp, err := client.setLegalHoldHandleResponse(httpResp) + return resp, err } // setLegalHoldCreateRequest creates the SetLegalHold request. @@ -2342,7 +2407,7 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2357,12 +2422,6 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2377,6 +2436,12 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC } result.LegalHold = &legalHold } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2384,25 +2449,28 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC // pairs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlobClientSetMetadataResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. @@ -2440,10 +2508,10 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2454,7 +2522,7 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2465,28 +2533,9 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options // setMetadataHandleResponse handles the SetMetadata response. func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) { result := BlobClientSetMetadataResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientSetMetadataResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2494,6 +2543,15 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -2501,11 +2559,21 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -2513,24 +2581,27 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl // SetTags - The Set Tags operation enables users to set tags on a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - tags - Blob tags // - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) { + var err error req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientSetTagsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTagsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusNoContent) { - return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusNoContent) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTagsResponse{}, err } - return client.setTagsHandleResponse(resp) + resp, err := client.setTagsHandleResponse(httpResp) + return resp, err } // setTagsCreateRequest creates the SetTags request. @@ -2548,7 +2619,7 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } @@ -2565,7 +2636,10 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, tags) + if err := runtime.MarshalAsXML(req, tags); err != nil { + return nil, err + } + return req, nil } // setTagsHandleResponse handles the SetTags response. @@ -2574,12 +2648,6 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2587,6 +2655,12 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -2596,24 +2670,27 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient // storage type. This operation does not update the blob's ETag. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - tier - Indicates the tier to be set on the blob. // - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) { + var err error req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlobClientSetTierResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTierResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { - return BlobClientSetTierResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientSetTierResponse{}, err } - return client.setTierHandleResponse(resp) + resp, err := client.setTierHandleResponse(httpResp) + return resp, err } // setTierCreateRequest creates the SetTier request. @@ -2638,7 +2715,7 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT if options != nil && options.RehydratePriority != nil { req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2670,7 +2747,7 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -2680,18 +2757,21 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) { + var err error req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return BlobClientStartCopyFromURLResponse{}, err } - return client.startCopyFromURLHandleResponse(resp) + resp, err := client.startCopyFromURLHandleResponse(httpResp) + return resp, err } // startCopyFromURLCreateRequest creates the StartCopyFromURL request. @@ -2719,10 +2799,10 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -2734,10 +2814,10 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -2752,7 +2832,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2763,7 +2843,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -2778,6 +2858,22 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop // startCopyFromURLHandleResponse handles the StartCopyFromURL response. func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) { result := BlobClientStartCopyFromURLResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -2788,9 +2884,6 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } @@ -2800,40 +2893,30 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B if val := resp.Header.Get("x-ms-version-id"); val != "" { result.VersionID = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlobClientStartCopyFromURLResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } // Undelete - Undelete a blob that was previously soft deleted // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { + var err error req, err := client.undeleteCreateRequest(ctx, options) if err != nil { return BlobClientUndeleteResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientUndeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlobClientUndeleteResponse{}, err } - return client.undeleteHandleResponse(resp) + resp, err := client.undeleteHandleResponse(httpResp) + return resp, err } // undeleteCreateRequest creates the Undelete request. @@ -2848,7 +2931,7 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2862,12 +2945,6 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -2875,5 +2952,11 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go index 631f69aa..bfd7f5ea 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -22,21 +21,10 @@ import ( ) // BlockBlobClient contains the methods for the BlockBlob group. -// Don't use this type directly, use NewBlockBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type BlockBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { - client := &BlockBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. @@ -48,7 +36,7 @@ func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { // belong to. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - blocks - Blob Blocks. // - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList // method. @@ -58,18 +46,21 @@ func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) { + var err error req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientCommitBlockListResponse{}, err } - return client.commitBlockListHandleResponse(resp) + resp, err := client.commitBlockListHandleResponse(httpResp) + return resp, err } // commitBlockListCreateRequest creates the CommitBlockList request. @@ -134,10 +125,10 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -148,7 +139,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -156,7 +147,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -165,21 +156,24 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, blocks) + if err := runtime.MarshalAsXML(req, blocks); err != nil { + return nil, err + } + return req, nil } // commitBlockListHandleResponse handles the CommitBlockList response. func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) { result := BlockBlobClientCommitBlockListResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -188,44 +182,44 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.ContentCRC64 = contentCRC64 - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.Date = &date } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.Date = &date + result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { - isServerEncrypted, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - result.IsServerEncrypted = &isServerEncrypted + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -233,24 +227,27 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response // GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. // - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) { + var err error req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientGetBlockListResponse{}, err } - return client.getBlockListHandleResponse(resp) + resp, err := client.getBlockListHandleResponse(httpResp) + return resp, err } // getBlockListCreateRequest creates the GetBlockList request. @@ -275,7 +272,7 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -286,28 +283,35 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li // getBlockListHandleResponse handles the GetBlockList response. func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) { result := BlockBlobClientGetBlockListResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.LastModified = &lastModified + result.BlobContentLength = &blobContentLength } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-Type"); val != "" { result.ContentType = &val } - if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { - blobContentLength, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - result.BlobContentLength = &blobContentLength + result.Date = &date } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -315,13 +319,6 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientGetBlockListResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { return BlockBlobClientGetBlockListResponse{}, err } @@ -335,7 +332,7 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // Block from URL API in conjunction with Put Block List. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request @@ -350,18 +347,21 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) { + var err error req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientPutBlobFromURLResponse{}, err } - return client.putBlobFromURLHandleResponse(resp) + resp, err := client.putBlobFromURLHandleResponse(httpResp) + return resp, err } // putBlobFromURLCreateRequest creates the PutBlobFromURL request. @@ -424,10 +424,10 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -439,10 +439,10 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -453,7 +453,7 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -470,6 +470,9 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if options != nil && options.CopySourceAuthorization != nil { req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -477,15 +480,8 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, // putBlobFromURLHandleResponse handles the PutBlobFromURL response. func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) { result := BlockBlobClientPutBlobFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientPutBlobFromURLResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -494,18 +490,6 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -513,6 +497,15 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -520,11 +513,21 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -532,7 +535,7 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // StageBlock - The Stage Block operation creates a new block to be committed as part of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -543,18 +546,21 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) { + var err error req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockResponse{}, err } - return client.stageBlockHandleResponse(resp) + resp, err := client.stageBlockHandleResponse(httpResp) + return resp, err } // stageBlockCreateRequest creates the StageBlock request. @@ -592,17 +598,30 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // stageBlockHandleResponse handles the StageBlock response. func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) { result := BlockBlobClientStageBlockResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { @@ -610,15 +629,6 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -626,12 +636,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.Date = &date } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockResponse{}, err - } - result.ContentCRC64 = contentCRC64 + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) @@ -640,11 +649,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -653,7 +662,7 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // are read from a URL. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -667,18 +676,21 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) { + var err error req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientStageBlockFromURLResponse{}, err } - return client.stageBlockFromURLHandleResponse(resp) + resp, err := client.stageBlockFromURLHandleResponse(httpResp) + return resp, err } // stageBlockFromURLCreateRequest creates the StageBlockFromURL request. @@ -721,10 +733,10 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -732,7 +744,7 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -746,12 +758,8 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex // stageBlockFromURLHandleResponse handles the StageBlockFromURL response. func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) { result := BlockBlobClientStageBlockFromURLResponse{} - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return BlockBlobClientStageBlockFromURLResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -760,14 +768,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return BlockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -776,6 +782,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -783,11 +795,11 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -798,7 +810,7 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // the content of a block blob, use the Put Block List operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - body - Initial data // - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. @@ -808,18 +820,21 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) { + var err error req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return BlockBlobClientUploadResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientUploadResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return BlockBlobClientUploadResponse{}, err } - return client.uploadHandleResponse(resp) + resp, err := client.uploadHandleResponse(httpResp) + return resp, err } // uploadCreateRequest creates the Upload request. @@ -882,10 +897,10 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -896,7 +911,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -904,7 +919,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -912,22 +927,21 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // uploadHandleResponse handles the Upload response. func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) { result := BlockBlobClientUploadResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return BlockBlobClientUploadResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -936,18 +950,6 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -955,6 +957,15 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -962,11 +973,21 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index 74e6cf1e..95af9e15 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -13,6 +12,7 @@ type AccessTier string const ( AccessTierArchive AccessTier = "Archive" + AccessTierCold AccessTier = "Cold" AccessTierCool AccessTier = "Cool" AccessTierHot AccessTier = "Hot" AccessTierP10 AccessTier = "P10" @@ -33,6 +33,7 @@ const ( func PossibleAccessTierValues() []AccessTier { return []AccessTier{ AccessTierArchive, + AccessTierCold, AccessTierCool, AccessTierHot, AccessTierP10, @@ -53,27 +54,28 @@ func PossibleAccessTierValues() []AccessTier { type AccountKind string const ( - AccountKindStorage AccountKind = "Storage" AccountKindBlobStorage AccountKind = "BlobStorage" - AccountKindStorageV2 AccountKind = "StorageV2" - AccountKindFileStorage AccountKind = "FileStorage" AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindStorage AccountKind = "Storage" + AccountKindStorageV2 AccountKind = "StorageV2" ) // PossibleAccountKindValues returns the possible values for the AccountKind const type. func PossibleAccountKindValues() []AccountKind { return []AccountKind{ - AccountKindStorage, AccountKindBlobStorage, - AccountKindStorageV2, - AccountKindFileStorage, AccountKindBlockBlobStorage, + AccountKindFileStorage, + AccountKindStorage, + AccountKindStorageV2, } } type ArchiveStatus string const ( + ArchiveStatusRehydratePendingToCold ArchiveStatus = "rehydrate-pending-to-cold" ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" ) @@ -81,25 +83,41 @@ const ( // PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. func PossibleArchiveStatusValues() []ArchiveStatus { return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCold, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot, } } +type BlobCopySourceTags string + +const ( + BlobCopySourceTagsCOPY BlobCopySourceTags = "COPY" + BlobCopySourceTagsREPLACE BlobCopySourceTags = "REPLACE" +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return []BlobCopySourceTags{ + BlobCopySourceTagsCOPY, + BlobCopySourceTagsREPLACE, + } +} + // BlobGeoReplicationStatus - The status of the secondary location type BlobGeoReplicationStatus string const ( - BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" ) // PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { return []BlobGeoReplicationStatus{ - BlobGeoReplicationStatusLive, BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusLive, BlobGeoReplicationStatusUnavailable, } } @@ -107,53 +125,53 @@ func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { type BlobType string const ( + BlobTypeAppendBlob BlobType = "AppendBlob" BlobTypeBlockBlob BlobType = "BlockBlob" BlobTypePageBlob BlobType = "PageBlob" - BlobTypeAppendBlob BlobType = "AppendBlob" ) // PossibleBlobTypeValues returns the possible values for the BlobType const type. func PossibleBlobTypeValues() []BlobType { return []BlobType{ + BlobTypeAppendBlob, BlobTypeBlockBlob, BlobTypePageBlob, - BlobTypeAppendBlob, } } type BlockListType string const ( + BlockListTypeAll BlockListType = "all" BlockListTypeCommitted BlockListType = "committed" BlockListTypeUncommitted BlockListType = "uncommitted" - BlockListTypeAll BlockListType = "all" ) // PossibleBlockListTypeValues returns the possible values for the BlockListType const type. func PossibleBlockListTypeValues() []BlockListType { return []BlockListType{ + BlockListTypeAll, BlockListTypeCommitted, BlockListTypeUncommitted, - BlockListTypeAll, } } type CopyStatusType string const ( - CopyStatusTypePending CopyStatusType = "pending" - CopyStatusTypeSuccess CopyStatusType = "success" CopyStatusTypeAborted CopyStatusType = "aborted" CopyStatusTypeFailed CopyStatusType = "failed" + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" ) // PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. func PossibleCopyStatusTypeValues() []CopyStatusType { return []CopyStatusType{ - CopyStatusTypePending, - CopyStatusTypeSuccess, CopyStatusTypeAborted, CopyStatusTypeFailed, + CopyStatusTypePending, + CopyStatusTypeSuccess, } } @@ -190,15 +208,15 @@ func PossibleDeleteTypeValues() []DeleteType { type EncryptionAlgorithmType string const ( - EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" ) // PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { return []EncryptionAlgorithmType{ - EncryptionAlgorithmTypeNone, EncryptionAlgorithmTypeAES256, + EncryptionAlgorithmTypeNone, } } @@ -221,50 +239,65 @@ func PossibleExpiryOptionsValues() []ExpiryOptions { } } +type FilterBlobsIncludeItem string + +const ( + FilterBlobsIncludeItemNone FilterBlobsIncludeItem = "none" + FilterBlobsIncludeItemVersions FilterBlobsIncludeItem = "versions" +) + +// PossibleFilterBlobsIncludeItemValues returns the possible values for the FilterBlobsIncludeItem const type. +func PossibleFilterBlobsIncludeItemValues() []FilterBlobsIncludeItem { + return []FilterBlobsIncludeItem{ + FilterBlobsIncludeItemNone, + FilterBlobsIncludeItemVersions, + } +} + type ImmutabilityPolicyMode string const ( + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" - ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" ) // PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeLocked, ImmutabilityPolicyModeMutable, ImmutabilityPolicyModeUnlocked, - ImmutabilityPolicyModeLocked, } } type ImmutabilityPolicySetting string const ( - ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" ) // PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { return []ImmutabilityPolicySetting{ - ImmutabilityPolicySettingUnlocked, ImmutabilityPolicySettingLocked, + ImmutabilityPolicySettingUnlocked, } } type LeaseDurationType string const ( - LeaseDurationTypeInfinite LeaseDurationType = "infinite" LeaseDurationTypeFixed LeaseDurationType = "fixed" + LeaseDurationTypeInfinite LeaseDurationType = "infinite" ) // PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. func PossibleLeaseDurationTypeValues() []LeaseDurationType { return []LeaseDurationType{ - LeaseDurationTypeInfinite, LeaseDurationTypeFixed, + LeaseDurationTypeInfinite, } } @@ -272,20 +305,20 @@ type LeaseStateType string const ( LeaseStateTypeAvailable LeaseStateType = "available" - LeaseStateTypeLeased LeaseStateType = "leased" - LeaseStateTypeExpired LeaseStateType = "expired" LeaseStateTypeBreaking LeaseStateType = "breaking" LeaseStateTypeBroken LeaseStateType = "broken" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeLeased LeaseStateType = "leased" ) // PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. func PossibleLeaseStateTypeValues() []LeaseStateType { return []LeaseStateType{ LeaseStateTypeAvailable, - LeaseStateTypeLeased, - LeaseStateTypeExpired, LeaseStateTypeBreaking, LeaseStateTypeBroken, + LeaseStateTypeExpired, + LeaseStateTypeLeased, } } @@ -309,14 +342,14 @@ type ListBlobsIncludeItem string const ( ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" + ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" - ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" - ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" - ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" - ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" ) // PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. @@ -324,30 +357,30 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { return []ListBlobsIncludeItem{ ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemDeletedwithversions, + ListBlobsIncludeItemImmutabilitypolicy, + ListBlobsIncludeItemLegalhold, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, ListBlobsIncludeItemVersions, - ListBlobsIncludeItemTags, - ListBlobsIncludeItemImmutabilitypolicy, - ListBlobsIncludeItemLegalhold, - ListBlobsIncludeItemDeletedwithversions, } } type ListContainersIncludeType string const ( - ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" ListContainersIncludeTypeSystem ListContainersIncludeType = "system" ) // PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { return []ListContainersIncludeType{ - ListContainersIncludeTypeMetadata, ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeMetadata, ListContainersIncludeTypeSystem, } } @@ -404,18 +437,18 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { type QueryFormatType string const ( + QueryFormatTypeArrow QueryFormatType = "arrow" QueryFormatTypeDelimited QueryFormatType = "delimited" QueryFormatTypeJSON QueryFormatType = "json" - QueryFormatTypeArrow QueryFormatType = "arrow" QueryFormatTypeParquet QueryFormatType = "parquet" ) // PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. func PossibleQueryFormatTypeValues() []QueryFormatType { return []QueryFormatType{ + QueryFormatTypeArrow, QueryFormatTypeDelimited, QueryFormatTypeJSON, - QueryFormatTypeArrow, QueryFormatTypeParquet, } } @@ -440,38 +473,38 @@ func PossibleRehydratePriorityValues() []RehydratePriority { type SKUName string const ( - SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNamePremiumLRS SKUName = "Premium_LRS" SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardLRS SKUName = "Standard_LRS" SKUNameStandardRAGRS SKUName = "Standard_RAGRS" SKUNameStandardZRS SKUName = "Standard_ZRS" - SKUNamePremiumLRS SKUName = "Premium_LRS" ) // PossibleSKUNameValues returns the possible values for the SKUName const type. func PossibleSKUNameValues() []SKUName { return []SKUName{ - SKUNameStandardLRS, + SKUNamePremiumLRS, SKUNameStandardGRS, + SKUNameStandardLRS, SKUNameStandardRAGRS, SKUNameStandardZRS, - SKUNamePremiumLRS, } } type SequenceNumberActionType string const ( + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" SequenceNumberActionTypeMax SequenceNumberActionType = "max" SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" - SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" ) // PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { return []SequenceNumberActionType{ + SequenceNumberActionTypeIncrement, SequenceNumberActionTypeMax, SequenceNumberActionTypeUpdate, - SequenceNumberActionTypeIncrement, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index 4658ccc4..ce1ff6fd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -25,46 +24,38 @@ import ( ) // ContainerClient contains the methods for the Container group. -// Don't use this type directly, use NewContainerClient() instead. +// Don't use this type directly, use a constructor function instead. type ContainerClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewContainerClient creates a new instance of ContainerClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient { - client := &ContainerClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. // - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) { + var err error req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientAcquireLeaseResponse{}, err } - return client.acquireLeaseHandleResponse(resp) + resp, err := client.acquireLeaseHandleResponse(httpResp) + return resp, err } // acquireLeaseCreateRequest creates the AcquireLease request. @@ -86,12 +77,12 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -102,6 +93,16 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du // acquireLeaseHandleResponse handles the AcquireLease response. func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) { result := ContainerClientAcquireLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -115,22 +116,12 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientAcquireLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -138,22 +129,25 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { + var err error req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientBreakLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientBreakLeaseResponse{}, err } - return client.breakLeaseHandleResponse(resp) + resp, err := client.breakLeaseHandleResponse(httpResp) + return resp, err } // breakLeaseCreateRequest creates the BreakLease request. @@ -174,12 +168,12 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -190,6 +184,16 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti // breakLeaseHandleResponse handles the BreakLease response. func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) { result := ContainerClientBreakLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientBreakLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -208,22 +212,12 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co } result.LeaseTime = &leaseTime } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientBreakLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -231,7 +225,7 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -239,18 +233,21 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) { + var err error req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientChangeLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientChangeLeaseResponse{}, err } - return client.changeLeaseHandleResponse(resp) + resp, err := client.changeLeaseHandleResponse(httpResp) + return resp, err } // changeLeaseCreateRequest creates the ChangeLease request. @@ -270,12 +267,12 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea req.Raw().Header["x-ms-lease-id"] = []string{leaseID} req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -286,6 +283,16 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea // changeLeaseHandleResponse handles the ChangeLease response. func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) { result := ContainerClientChangeLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientChangeLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -299,22 +306,12 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientChangeLeaseResponse{}, err - } - result.Date = &date - } return result, nil } @@ -322,22 +319,25 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C // fails // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. // - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo) if err != nil { return ContainerClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -362,7 +362,7 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options if options != nil && options.Access != nil { req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -379,6 +379,16 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options // createHandleResponse handles the Create response. func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) { result := ContainerClientCreateResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientCreateResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -389,22 +399,12 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientCreateResponse{}, err - } - result.Date = &date - } return result, nil } @@ -412,23 +412,26 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai // deleted during garbage collection // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) { + var err error req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientDeleteResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientDeleteResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientDeleteResponse{}, err } - return client.deleteHandleResponse(resp) + resp, err := client.deleteHandleResponse(httpResp) + return resp, err } // deleteCreateRequest creates the Delete request. @@ -447,12 +450,12 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -466,19 +469,100 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientDeleteResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } + return result, nil +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs in a container whose tags match a given search expression. +// Filter blobs searches within the given container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-08-03 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { + var err error + req, err := client.filterBlobsCreateRequest(ctx, where, options) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + httpResp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientFilterBlobsResponse{}, err + } + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("where", where) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (ContainerClientFilterBlobsResponse, error) { + result := ContainerClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { - return ContainerClientDeleteResponse{}, err + return ContainerClientFilterBlobsResponse{}, err } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ContainerClientFilterBlobsResponse{}, err + } return result, nil } @@ -486,23 +570,26 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai // be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) { + var err error req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccessPolicyResponse{}, err } - return client.getAccessPolicyHandleResponse(resp) + resp, err := client.getAccessPolicyHandleResponse(httpResp) + return resp, err } // getAccessPolicyCreateRequest creates the GetAccessPolicy request. @@ -521,7 +608,7 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -535,6 +622,16 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { result.BlobPublicAccess = (*PublicAccessType)(&val) } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -545,22 +642,12 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetAccessPolicyResponse{}, err - } - result.Date = &date - } if err := runtime.UnmarshalAsXML(resp, &result); err != nil { return ContainerClientGetAccessPolicyResponse{}, err } @@ -570,22 +657,25 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo // method. func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -598,7 +688,7 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -606,15 +696,12 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) { result := ContainerClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -622,11 +709,14 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } if val := resp.Header.Get("x-ms-sku-name"); val != "" { result.SKUName = (*SKUName)(&val) } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -635,22 +725,25 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) // does not include the container's list of blobs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -668,7 +761,7 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -679,42 +772,12 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o // getPropertiesHandleResponse handles the GetProperties response. func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) { result := ContainerClientGetPropertiesResponse{} - for hh := range resp.Header { - if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { - if result.Metadata == nil { - result.Metadata = map[string]*string{} - } - result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) - } - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientGetPropertiesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("x-ms-lease-duration"); val != "" { - result.LeaseDuration = (*LeaseDurationType)(&val) - } - if val := resp.Header.Get("x-ms-lease-state"); val != "" { - result.LeaseState = (*LeaseStateType)(&val) - } - if val := resp.Header.Get("x-ms-lease-status"); val != "" { - result.LeaseStatus = (*LeaseStatusType)(&val) + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -722,8 +785,18 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.Date = &date } - if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { - result.BlobPublicAccess = (*PublicAccessType)(&val) + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return ContainerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) } if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { hasImmutabilityPolicy, err := strconv.ParseBool(val) @@ -739,29 +812,49 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) } result.HasLegalHold = &hasLegalHold } - if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { - result.DefaultEncryptionScope = &val - } - if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { - denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled } - if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { - isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return ContainerClientGetPropertiesResponse{}, err } - result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]*string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh)) + } + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } // NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager // method. // @@ -790,7 +883,7 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -801,17 +894,11 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont // listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) { result := ContainerClientListBlobFlatSegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -820,6 +907,12 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { return ContainerClientListBlobFlatSegmentResponse{}, err } @@ -828,7 +921,7 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp // NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that // acts as a placeholder for all blobs whose names begin with the same substring up to the // appearance of the delimiter character. The delimiter may be a single character or a string. @@ -840,23 +933,16 @@ func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + }, nil) if err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } - resp, err := client.pl.Do(req) - if err != nil { - return ContainerClientListBlobHierarchySegmentResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientListBlobHierarchySegmentResponse{}, runtime.NewResponseError(resp) - } return client.ListBlobHierarchySegmentHandleResponse(resp) }, }) @@ -888,7 +974,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -899,17 +985,11 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context // ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) { result := ContainerClientListBlobHierarchySegmentResponse{} - if val := resp.Header.Get("Content-Type"); val != "" { - result.ContentType = &val - } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -918,6 +998,12 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } @@ -928,23 +1014,26 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) { + var err error req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientReleaseLeaseResponse{}, err } - return client.releaseLeaseHandleResponse(resp) + resp, err := client.releaseLeaseHandleResponse(httpResp) + return resp, err } // releaseLeaseCreateRequest creates the ReleaseLease request. @@ -963,12 +1052,12 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le req.Raw().Header["x-ms-lease-action"] = []string{"release"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -979,6 +1068,16 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le // releaseLeaseHandleResponse handles the ReleaseLease response. func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) { result := ContainerClientReleaseLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -989,44 +1088,37 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) ( } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientReleaseLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // Rename - Renames an existing container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - sourceContainerName - Required. Specifies the name of the container to rename. // - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { + var err error req, err := client.renameCreateRequest(ctx, sourceContainerName, options) if err != nil { return ContainerClientRenameResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenameResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenameResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenameResponse{}, err } - return client.renameHandleResponse(resp) + resp, err := client.renameHandleResponse(httpResp) + return resp, err } // renameCreateRequest creates the Rename request. @@ -1042,7 +1134,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1060,12 +1152,6 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1073,6 +1159,12 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -1080,23 +1172,26 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) { + var err error req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenewLeaseResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRenewLeaseResponse{}, err } - return client.renewLeaseHandleResponse(resp) + resp, err := client.renewLeaseHandleResponse(httpResp) + return resp, err } // renewLeaseCreateRequest creates the RenewLease request. @@ -1115,12 +1210,12 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas req.Raw().Header["x-ms-lease-action"] = []string{"renew"} req.Raw().Header["x-ms-lease-id"] = []string{leaseID} if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1131,6 +1226,16 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas // renewLeaseHandleResponse handles the RenewLease response. func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) { result := ContainerClientRenewLeaseResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientRenewLeaseResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1144,43 +1249,36 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co if val := resp.Header.Get("x-ms-lease-id"); val != "" { result.LeaseID = &val } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientRenewLeaseResponse{}, err - } - result.Date = &date - } return result, nil } // Restore - Restores a previously-deleted container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { + var err error req, err := client.restoreCreateRequest(ctx, options) if err != nil { return ContainerClientRestoreResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRestoreResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return ContainerClientRestoreResponse{}, err } - return client.restoreHandleResponse(resp) + resp, err := client.restoreHandleResponse(httpResp) + return resp, err } // restoreCreateRequest creates the Restore request. @@ -1196,7 +1294,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1216,12 +1314,6 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -1229,6 +1321,12 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -1236,25 +1334,28 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta // may be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - containerACL - the acls for the container // - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) { + var err error req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetAccessPolicyResponse{}, err } - return client.setAccessPolicyHandleResponse(resp) + resp, err := client.setAccessPolicyHandleResponse(httpResp) + return resp, err } // setAccessPolicyCreateRequest creates the SetAccessPolicy request. @@ -1277,12 +1378,12 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1291,12 +1392,25 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, XMLName xml.Name `xml:"SignedIdentifiers"` ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` } - return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}) + if err := runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}); err != nil { + return nil, err + } + return req, nil } // setAccessPolicyHandleResponse handles the SetAccessPolicy response. func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) { result := ContainerClientSetAccessPolicyResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1307,45 +1421,38 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetAccessPolicyResponse{}, err - } - result.Date = &date - } return result, nil } // SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) { + var err error req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return ContainerClientSetMetadataResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetMetadataResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSetMetadataResponse{}, err } - return client.setMetadataHandleResponse(resp) + resp, err := client.setMetadataHandleResponse(httpResp) + return resp, err } // setMetadataCreateRequest creates the SetMetadata request. @@ -1372,9 +1479,9 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt } } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1385,6 +1492,16 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt // setMetadataHandleResponse handles the SetMetadata response. func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) { result := ContainerClientSetMetadataResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientSetMetadataResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -1395,47 +1512,40 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return ContainerClientSetMetadataResponse{}, err - } - result.Date = &date - } return result, nil } // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // - body - Initial data // - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ContainerClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. @@ -1454,12 +1564,15 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con runtime.SkipBodyDownload(req) req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/xml") + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index 022807f5..7251de83 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -26,89 +25,6 @@ type AccessPolicy struct { Start *time.Time `xml:"Start"` } -// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL -// method. -type AppendBlobClientAppendBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. -type AppendBlobClientAppendBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. -type AppendBlobClientCreateOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. -type AppendBlobClientSealOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. -type AppendPositionAccessConditions struct { - // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. - // Append Block will succeed only if the append position is equal to this number. If - // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). - AppendPosition *int64 - // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would - // cause the blob to exceed that limit or if the blob size is already greater than - // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - - // Precondition Failed). - MaxSize *int64 -} - // ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. type ArrowConfiguration struct { // REQUIRED @@ -124,403 +40,11 @@ type ArrowField struct { Scale *int32 `xml:"Scale"` } -// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. -type BlobClientAbortCopyFromURLOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. -type BlobClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. -type BlobClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. -type BlobClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. -type BlobClientCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. -type BlobClientCreateSnapshotOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy -// method. -type BlobClientDeleteImmutabilityPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. -type BlobClientDeleteOptions struct { - // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob - // and all of its snapshots. only: Delete only the blob's snapshots and not the blob - // itself - DeleteSnapshots *DeleteSnapshotsOptionType - // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. - DeleteType *DeleteType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. -type BlobClientDownloadOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentCRC64 *bool - // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the - // range is less than or equal to 4 MB in size. - RangeGetContentMD5 *bool - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. -type BlobClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. -type BlobClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. -type BlobClientGetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. -type BlobClientQueryOptions struct { - // the query request - QueryRequest *QueryRequest - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. -type BlobClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. -type BlobClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. -type BlobClientSetExpiryOptions struct { - // The time to set the blob to expiry - ExpiresOn *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. -type BlobClientSetHTTPHeadersOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. -type BlobClientSetImmutabilityPolicyOptions struct { - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. -type BlobClientSetLegalHoldOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. -type BlobClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. -type BlobClientSetTagsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. -type BlobClientSetTierOptions struct { - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. - // It's for service version 2019-10-10 and newer. - VersionID *string -} - -// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. -type BlobClientStartCopyFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Optional: Indicates the priority with which to rehydrate an archived blob. - RehydratePriority *RehydratePriority - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. - SealBlob *bool - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. -type BlobClientUndeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - type BlobFlatListSegment struct { // REQUIRED BlobItems []*BlobItem `xml:"Blob"` } -// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. -type BlobHTTPHeaders struct { - // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. - BlobCacheControl *string - // Optional. Sets the blob's Content-Disposition header. - BlobContentDisposition *string - // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentEncoding *string - // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read - // request. - BlobContentLanguage *string - // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks - // were validated when each was uploaded. - BlobContentMD5 []byte - // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. - BlobContentType *string -} - type BlobHierarchyListSegment struct { // REQUIRED BlobItems []*BlobItem `xml:"Blob"` @@ -554,6 +78,14 @@ type BlobItem struct { VersionID *string `xml:"VersionId"` } +type BlobName struct { + // The name of the blob. + Content *string `xml:",chardata"` + + // Indicates if the blob name is encoded. + Encoded *bool `xml:"Encoded,attr"` +} + type BlobPrefix struct { // REQUIRED Name *string `xml:"Name"` @@ -636,141 +168,6 @@ type Block struct { Size *int64 `xml:"Size"` } -// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. -type BlockBlobClientCommitBlockListOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. -type BlockBlobClientGetBlockListOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. -type BlockBlobClientPutBlobFromURLOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Optional, default is true. Indicates if properties from the source blob should be copied. - CopySourceBlobProperties *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. -type BlockBlobClientStageBlockFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // Bytes of source data in the specified range. - SourceRange *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. -type BlockBlobClientStageBlockOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - -// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. -type BlockBlobClientUploadOptions struct { - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the blob. - Tier *AccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - type BlockList struct { CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` @@ -790,250 +187,6 @@ type ClearRange struct { Start *int64 `xml:"Start"` } -// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. -type ContainerClientAcquireLeaseOptions struct { - // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is - // not in the correct format. See Guid Constructor (String) for a list of valid GUID - // string formats. - ProposedLeaseID *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. -type ContainerClientBreakLeaseOptions struct { - // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This - // break period is only used if it is shorter than the time remaining on the - // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has - // expired, but the lease may be held for longer than the break period. If this - // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, - // and an infinite lease breaks immediately. - BreakPeriod *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. -type ContainerClientChangeLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. -type ContainerClientCreateOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. -type ContainerClientDeleteOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. -type ContainerClientGetAccessPolicyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. -type ContainerClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. -type ContainerClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager -// method. -type ContainerClientListBlobFlatSegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager -// method. -type ContainerClientListBlobHierarchySegmentOptions struct { - // Include this parameter to specify one or more datasets to include in the response. - Include []ListBlobsIncludeItem - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. -type ContainerClientReleaseLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. -type ContainerClientRenameOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. - SourceLeaseID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. -type ContainerClientRenewLeaseOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. -type ContainerClientRestoreOptions struct { - // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. - DeletedContainerName *string - // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. - DeletedContainerVersion *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. -type ContainerClientSetAccessPolicyOptions struct { - // Specifies whether data in the container may be accessed publicly and the level of access - Access *PublicAccessType - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. -type ContainerClientSetMetadataOptions struct { - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. -type ContainerClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. -type ContainerCPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all - // future writes. - DefaultEncryptionScope *string - // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than - // the scope set on the container. - PreventEncryptionScopeOverride *bool -} - // ContainerItem - An Azure Storage container type ContainerItem struct { // REQUIRED @@ -1095,27 +248,6 @@ type CORSRule struct { MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` } -// CPKInfo contains a group of parameters for the BlobClient.Download method. -type CPKInfo struct { - // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided - // if the x-ms-encryption-key header is provided. - EncryptionAlgorithm *EncryptionAlgorithmType - // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption - // is performed with the root account encryption key. For more information, see - // Encryption at Rest for Azure Storage Services. - EncryptionKey *string - // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. - EncryptionKeySHA256 *string -} - -// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. -type CPKScopeInfo struct { - // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided - // in the request. If not specified, encryption is performed with the default - // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. - EncryptionScope *string -} - // DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. type DelimitedTextConfiguration struct { // The string used to separate columns. @@ -1140,10 +272,12 @@ type FilterBlobItem struct { ContainerName *string `xml:"ContainerName"` // REQUIRED - Name *string `xml:"Name"` + Name *string `xml:"Name"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` // Blob tags - Tags *BlobTags `xml:"Tags"` + Tags *BlobTags `xml:"Tags"` + VersionID *string `xml:"VersionId"` } // FilterBlobSegment - The result of a Filter Blobs API call @@ -1185,12 +319,6 @@ type KeyInfo struct { Start *string `xml:"Start"` } -// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -type LeaseAccessConditions struct { - // If specified, the operation only succeeds if the resource's lease is active and matches this ID. - LeaseID *string -} - // ListBlobsFlatSegmentResponse - An enumeration of blobs type ListBlobsFlatSegmentResponse struct { // REQUIRED @@ -1270,195 +398,6 @@ type Metrics struct { Version *string `xml:"Version"` } -// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. -type ModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - IfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - IfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - IfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - IfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - IfUnmodifiedSince *time.Time -} - -// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. -type PageBlobClientClearPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. -type PageBlobClientCopyIncrementalOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. -type PageBlobClientCreateOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Optional. Used to set blob tags in various blob operations. - BlobTagsString *string - // Specifies the date time when the blobs immutability policy is set to expire. - ImmutabilityPolicyExpiry *time.Time - // Specifies the immutability policy mode to set on the blob. - ImmutabilityPolicyMode *ImmutabilityPolicySetting - // Specified if a legal hold should be set on the blob. - LegalHold *bool - // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the - // operation will copy the metadata from the source blob or file to the destination - // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata - // is not copied from the source blob or file. Note that beginning with - // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, - // Blobs, and Metadata for more information. - Metadata map[string]*string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Optional. Indicates the tier to be set on the page blob. - Tier *PremiumPageBlobAccessTier - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager -// method. -type PageBlobClientGetPageRangesDiffOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot - // of the target blob. The response will only contain pages that were changed - // between the target blob and its previous snapshot. - PrevSnapshotURL *string - // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response - // will contain only pages that were changed between target blob and previous - // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot - // specified by prevsnapshot is the older of the two. Note that incremental - // snapshots are currently supported only for blobs created on or after January 1, 2016. - Prevsnapshot *string - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. -type PageBlobClientGetPageRangesOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more - // information on working with blob snapshots, see Creating a Snapshot of a Blob. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] - Snapshot *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. -type PageBlobClientResizeOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber -// method. -type PageBlobClientUpdateSequenceNumberOptions struct { - // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of - // the sequence number must be between 0 and 2^63 - 1. - BlobSequenceNumber *int64 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. -type PageBlobClientUploadPagesFromURLOptions struct { - // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. - CopySourceAuthorization *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // Specify the md5 calculated for the range of bytes that must be read from the copy source. - SourceContentMD5 []byte - // Specify the crc64 calculated for the range of bytes that must be read from the copy source. - SourceContentcrc64 []byte - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. -type PageBlobClientUploadPagesOptions struct { - // Return only the bytes of the blob in the specified range. - Range *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 - // Specify the transactional crc64 for the body, to be validated by the service. - TransactionalContentCRC64 []byte - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte -} - // PageList - the list of pages type PageList struct { ClearRange []*ClearRange `xml:"ClearRange"` @@ -1521,120 +460,6 @@ type RetentionPolicy struct { Days *int32 `xml:"Days"` } -// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. -type SequenceNumberAccessConditions struct { - // Specify this header value to operate only on a blob if it has the specified sequence number. - IfSequenceNumberEqualTo *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than the specified. - IfSequenceNumberLessThan *int64 - // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. - IfSequenceNumberLessThanOrEqualTo *int64 -} - -// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. -type ServiceClientFilterBlobsOptions struct { - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. -type ServiceClientGetAccountInfoOptions struct { - // placeholder for future optional parameters -} - -// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. -type ServiceClientGetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. -type ServiceClientGetStatisticsOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. -type ServiceClientGetUserDelegationKeyOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager -// method. -type ServiceClientListContainersSegmentOptions struct { - // Include this parameter to specify that the container's metadata be returned as part of the response body. - Include []ListContainersIncludeType - // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The - // operation returns the NextMarker value within the response body if the listing - // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used - // as the value for the marker parameter in a subsequent call to request the next - // page of list items. The marker value is opaque to the client. - Marker *string - // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value - // greater than 5000, the server will return up to 5000 items. Note that if the - // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder - // of the results. For this reason, it is possible that the service will - // return fewer results than specified by maxresults, or than the default of 5000. - Maxresults *int32 - // Filters the results to return only containers whose name begins with the specified prefix. - Prefix *string - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. -type ServiceClientSetPropertiesOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - -// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. -type ServiceClientSubmitBatchOptions struct { - // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage - // analytics logging is enabled. - RequestID *string - // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. - // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] - Timeout *int32 -} - // SignedIdentifier - signed identifier type SignedIdentifier struct { // REQUIRED; An Access policy @@ -1644,20 +469,6 @@ type SignedIdentifier struct { ID *string `xml:"Id"` } -// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. -type SourceModifiedAccessConditions struct { - // Specify an ETag value to operate only on blobs with a matching value. - SourceIfMatch *azcore.ETag - // Specify this header value to operate only on a blob if it has been modified since the specified date/time. - SourceIfModifiedSince *time.Time - // Specify an ETag value to operate only on blobs without a matching value. - SourceIfNoneMatch *azcore.ETag - // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. - SourceIfTags *string - // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. - SourceIfUnmodifiedSince *time.Time -} - // StaticWebsite - The properties that enable an account to host a static website type StaticWebsite struct { // REQUIRED; Indicates whether this account is hosting a static website @@ -1674,7 +485,7 @@ type StaticWebsite struct { } type StorageError struct { - Message *string `json:"Message,omitempty"` + Message *string } // StorageServiceProperties - Storage Service Properties. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go index e5b6cda2..7e094db8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -24,12 +23,12 @@ func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(&a), - Expiry: (*timeRFC3339)(a.Expiry), - Start: (*timeRFC3339)(a.Start), + Expiry: (*dateTimeRFC3339)(a.Expiry), + Start: (*dateTimeRFC3339)(a.Start), } return enc.EncodeElement(aux, start) } @@ -39,8 +38,8 @@ func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) er type alias AccessPolicy aux := &struct { *alias - Expiry *timeRFC3339 `xml:"Expiry"` - Start *timeRFC3339 `xml:"Start"` + Expiry *dateTimeRFC3339 `xml:"Expiry"` + Start *dateTimeRFC3339 `xml:"Start"` }{ alias: (*alias)(a), } @@ -101,48 +100,30 @@ func (b BlobHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartEl return enc.EncodeElement(aux, start) } -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. -func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias BlobItem - aux := &struct { - *alias - Metadata additionalProperties `xml:"Metadata"` - OrMetadata additionalProperties `xml:"OrMetadata"` - }{ - alias: (*alias)(b), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - b.Metadata = (map[string]*string)(aux.Metadata) - b.OrMetadata = (map[string]*string)(aux.OrMetadata) - return nil -} - // MarshalXML implements the xml.Marshaller interface for type BlobProperties. func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlobProperties aux := &struct { *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&b), - AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), - CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), - CreationTime: (*timeRFC1123)(b.CreationTime), - DeletedTime: (*timeRFC1123)(b.DeletedTime), - ExpiresOn: (*timeRFC1123)(b.ExpiresOn), - ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), - LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), - LastModified: (*timeRFC1123)(b.LastModified), + AccessTierChangeTime: (*dateTimeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*dateTimeRFC1123)(b.CopyCompletionTime), + CreationTime: (*dateTimeRFC1123)(b.CreationTime), + DeletedTime: (*dateTimeRFC1123)(b.DeletedTime), + ExpiresOn: (*dateTimeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*dateTimeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*dateTimeRFC1123)(b.LastAccessedOn), + LastModified: (*dateTimeRFC1123)(b.LastModified), } if b.ContentMD5 != nil { encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat) @@ -156,15 +137,15 @@ func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) type alias BlobProperties aux := &struct { *alias - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` - ContentMD5 *string `xml:"Content-MD5"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` - ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` - LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + AccessTierChangeTime *dateTimeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *string `xml:"Content-MD5"` + CopyCompletionTime *dateTimeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *dateTimeRFC1123 `xml:"Creation-Time"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + ExpiresOn *dateTimeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *dateTimeRFC1123 `xml:"LastAccessTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(b), } @@ -267,12 +248,12 @@ func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(&c), - DeletedTime: (*timeRFC1123)(c.DeletedTime), - LastModified: (*timeRFC1123)(c.LastModified), + DeletedTime: (*dateTimeRFC1123)(c.DeletedTime), + LastModified: (*dateTimeRFC1123)(c.LastModified), } return enc.EncodeElement(aux, start) } @@ -282,8 +263,8 @@ func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElem type alias ContainerProperties aux := &struct { *alias - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - LastModified *timeRFC1123 `xml:"Last-Modified"` + DeletedTime *dateTimeRFC1123 `xml:"DeletedTime"` + LastModified *dateTimeRFC1123 `xml:"Last-Modified"` }{ alias: (*alias)(c), } @@ -315,10 +296,10 @@ func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) err type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(&g), - LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + LastSyncTime: (*dateTimeRFC1123)(g.LastSyncTime), } return enc.EncodeElement(aux, start) } @@ -328,7 +309,7 @@ func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) type alias GeoReplication aux := &struct { *alias - LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"` }{ alias: (*alias)(g), } @@ -432,12 +413,12 @@ func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement) type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(&u), - SignedExpiry: (*timeRFC3339)(u.SignedExpiry), - SignedStart: (*timeRFC3339)(u.SignedStart), + SignedExpiry: (*dateTimeRFC3339)(u.SignedExpiry), + SignedStart: (*dateTimeRFC3339)(u.SignedStart), } return enc.EncodeElement(aux, start) } @@ -447,8 +428,8 @@ func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElemen type alias UserDelegationKey aux := &struct { *alias - SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` - SignedStart *timeRFC3339 `xml:"SignedStart"` + SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"` + SignedStart *dateTimeRFC3339 `xml:"SignedStart"` }{ alias: (*alias)(u), } @@ -470,6 +451,16 @@ func populate(m map[string]any, k string, v any) { } } +func populateAny(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else { + m[k] = v + } +} + func unpopulate(data json.RawMessage, fn string, v any) error { if data == nil { return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go new file mode 100644 index 00000000..216f8b73 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go @@ -0,0 +1,1469 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package generated + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "time" +) + +// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL +// method. +type AppendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. +type AppendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. +type AppendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. +type AppendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. +type BlobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method. +type BlobClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. +type BlobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method. +type BlobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method. +type BlobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. +type BlobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy +// method. +type BlobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. +type BlobClientDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + DeleteType *DeleteType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. +type BlobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. +type BlobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. +type BlobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. +type BlobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. +type BlobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. +type BlobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. +type BlobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. +type BlobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. +type BlobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method. +type BlobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. +type BlobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. +type BlobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. +type BlobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. +type BlobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method. +type BlobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. +type BlobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method. +type BlockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. +type BlockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method. +type BlockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method. +type BlockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // Bytes of source data in the specified range. + SourceRange *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method. +type BlockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. +type BlockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method. +type ContainerClientAcquireLeaseOptions struct { + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. +type ContainerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method. +type ContainerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. +type ContainerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. +type ContainerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +type ContainerClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. +type ContainerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method. +type ContainerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. +type ContainerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager +// method. +type ContainerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager +// method. +type ContainerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. +type ContainerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. +type ContainerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. +type ContainerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. +type ContainerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method. +type ContainerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. +type ContainerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method. +type ContainerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. +type ContainerCPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// CPKInfo contains a group of parameters for the BlobClient.Download method. +type CPKInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +type CPKScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. +type PageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method. +type PageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method. +type PageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *ImmutabilityPolicySetting + + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]*string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager +// method. +type PageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method. +type PageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. +type PageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber +// method. +type PageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method. +type PageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. +type PageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. +type ServiceClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. +type ServiceClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. +type ServiceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. +type ServiceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method. +type ServiceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager +// method. +type ServiceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. +type ServiceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. +type ServiceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *azcore.ETag + + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *azcore.ETag + + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go index 75db1c17..bfa9883f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -22,27 +21,16 @@ import ( ) // PageBlobClient contains the methods for the PageBlob group. -// Don't use this type directly, use NewPageBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type PageBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { - client := &PageBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // ClearPages - The Clear Pages operation clears a set of pages from a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -52,18 +40,21 @@ func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) { + var err error req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientClearPagesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientClearPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientClearPagesResponse{}, err } - return client.clearPagesHandleResponse(resp) + resp, err := client.clearPagesHandleResponse(httpResp) + return resp, err } // clearPagesCreateRequest creates the ClearPages request. @@ -108,10 +99,10 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -122,7 +113,7 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -133,15 +124,22 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte // clearPagesHandleResponse handles the ClearPages response. func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) { result := PageBlobClientClearPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return PageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.LastModified = &lastModified + result.ContentCRC64 = contentCRC64 } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -150,22 +148,22 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-content-crc64"); val != "" { - contentCRC64, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.ContentCRC64 = contentCRC64 + result.Date = &date } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) if err != nil { return PageBlobClientClearPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val + result.LastModified = &lastModified } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val @@ -173,13 +171,6 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientClearPagesResponse{}, err - } - result.Date = &date - } return result, nil } @@ -190,7 +181,7 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // 2016-05-31. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -198,18 +189,21 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) { + var err error req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCopyIncrementalResponse{}, err } - return client.copyIncrementalHandleResponse(resp) + resp, err := client.copyIncrementalHandleResponse(httpResp) + return resp, err } // copyIncrementalCreateRequest creates the CopyIncremental request. @@ -225,10 +219,10 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, } req.Raw().URL.RawQuery = reqQP.Encode() if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -240,7 +234,7 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } req.Raw().Header["x-ms-copy-source"] = []string{copySource} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -251,6 +245,22 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, // copyIncrementalHandleResponse handles the CopyIncremental response. func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) { result := PageBlobClientCopyIncrementalResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } if val := resp.Header.Get("ETag"); val != "" { result.ETag = (*azcore.ETag)(&val) } @@ -261,35 +271,19 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) } result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } if val := resp.Header.Get("x-ms-request-id"); val != "" { result.RequestID = &val } if val := resp.Header.Get("x-ms-version"); val != "" { result.Version = &val } - if val := resp.Header.Get("Date"); val != "" { - date, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCopyIncrementalResponse{}, err - } - result.Date = &date - } - if val := resp.Header.Get("x-ms-copy-id"); val != "" { - result.CopyID = &val - } - if val := resp.Header.Get("x-ms-copy-status"); val != "" { - result.CopyStatus = (*CopyStatusType)(&val) - } return result, nil } // Create - The Create operation creates a new page blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. @@ -300,18 +294,21 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) { + var err error req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCreateResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientCreateResponse{}, err } - return client.createHandleResponse(resp) + resp, err := client.createHandleResponse(httpResp) + return resp, err } // createCreateRequest creates the Create request. @@ -371,10 +368,10 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -389,7 +386,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -397,7 +394,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString} } if options != nil && options.ImmutabilityPolicyExpiry != nil { - req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)} + req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)} } if options != nil && options.ImmutabilityPolicyMode != nil { req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)} @@ -412,15 +409,8 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe // createHandleResponse handles the Create response. func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) { result := PageBlobClientCreateResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientCreateResponse{}, err - } - result.LastModified = &lastModified + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("Content-MD5"); val != "" { contentMD5, err := base64.StdEncoding.DecodeString(val) @@ -429,18 +419,6 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.ContentMD5 = contentMD5 } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } - if val := resp.Header.Get("x-ms-version-id"); val != "" { - result.VersionID = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -448,6 +426,15 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -455,11 +442,21 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val } return result, nil } @@ -467,7 +464,7 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo // NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot // of a page blob // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -478,23 +475,16 @@ func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPa return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) - } - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } - resp, err := client.pl.Do(req) + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesHandleResponse(resp) }, }) @@ -528,10 +518,10 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -542,7 +532,7 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -553,16 +543,6 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op // GetPageRangesHandleResponse handles the GetPageRanges response. func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) { result := PageBlobClientGetPageRangesResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -573,12 +553,6 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -586,6 +560,22 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesResponse{}, err } @@ -595,7 +585,7 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( // NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that // were changed between target blob and previous snapshot. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -606,23 +596,16 @@ func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientG return page.NextMarker != nil && len(*page.NextMarker) > 0 }, Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) { - var req *policy.Request - var err error - if page == nil { - req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) - } else { - req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker) + nextLink := "" + if page != nil { + nextLink = *page.NextMarker } + resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) { + return client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions) + }, nil) if err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } - resp, err := client.pl.Do(req) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp) - } return client.GetPageRangesDiffHandleResponse(resp) }, }) @@ -662,10 +645,10 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -676,7 +659,7 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -687,16 +670,6 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context // GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response. func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) { result := PageBlobClientGetPageRangesDiffResponse{} - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientGetPageRangesDiffResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { blobContentLength, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -707,12 +680,6 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -720,6 +687,22 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } @@ -729,7 +712,7 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // Resize - Resize the Blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. // - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. @@ -738,18 +721,21 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) { + var err error req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) if err != nil { return PageBlobClientResizeResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientResizeResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientResizeResponse{}, err } - return client.resizeHandleResponse(resp) + resp, err := client.resizeHandleResponse(httpResp) + return resp, err } // resizeCreateRequest creates the Resize request. @@ -780,10 +766,10 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -795,7 +781,7 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -806,16 +792,6 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte // resizeHandleResponse handles the Resize response. func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) { result := PageBlobClientResizeResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientResizeResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -826,12 +802,6 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -839,13 +809,29 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // UpdateSequenceNumber - Update the sequence number of the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to // page blobs only. This property indicates how the service should modify the blob's sequence number // - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber @@ -853,18 +839,21 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) { + var err error req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUpdateSequenceNumberResponse{}, err } - return client.updateSequenceNumberHandleResponse(resp) + resp, err := client.updateSequenceNumberHandleResponse(httpResp) + return resp, err } // updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. @@ -883,10 +872,10 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -901,7 +890,7 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -912,16 +901,6 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont // updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) { result := PageBlobClientUpdateSequenceNumberResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUpdateSequenceNumberResponse{}, err - } - result.LastModified = &lastModified - } if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { @@ -932,12 +911,6 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -945,13 +918,29 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } // UploadPages - The Upload Pages operation writes a range of pages to a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - body - Initial data // - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. @@ -962,18 +951,21 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) { + var err error req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesResponse{}, err } - return client.uploadPagesHandleResponse(resp) + resp, err := client.uploadPagesHandleResponse(httpResp) + return resp, err } // uploadPagesCreateRequest creates the UploadPages request. @@ -1024,10 +1016,10 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1038,33 +1030,29 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // uploadPagesHandleResponse handles the UploadPages response. func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) { result := PageBlobClientUploadPagesResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.LastModified = &lastModified + result.BlobSequenceNumber = &blobSequenceNumber } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) - if err != nil { - return PageBlobClientUploadPagesResponse{}, err - } - result.ContentMD5 = contentMD5 + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -1073,21 +1061,12 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-client-request-id"); val != "" { - result.ClientRequestID = &val - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1096,6 +1075,15 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1103,11 +1091,18 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } @@ -1116,7 +1111,7 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // a URL // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - sourceURL - Specify a URL to the copy source. // - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header // and x-ms-range/Range destination range header. @@ -1134,18 +1129,21 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL // method. func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) { + var err error req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusCreated) { - return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusCreated) { + err = runtime.NewResponseError(httpResp) + return PageBlobClientUploadPagesFromURLResponse{}, err } - return client.uploadPagesFromURLHandleResponse(resp) + resp, err := client.uploadPagesFromURLHandleResponse(httpResp) + return resp, err } // uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. @@ -1196,10 +1194,10 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { - req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { - req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)} @@ -1211,10 +1209,10 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { - req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { - req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)} + req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)} @@ -1222,7 +1220,7 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1236,22 +1234,12 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex // uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) { result := PageBlobClientUploadPagesFromURLResponse{} - if val := resp.Header.Get("ETag"); val != "" { - result.ETag = (*azcore.ETag)(&val) - } - if val := resp.Header.Get("Last-Modified"); val != "" { - lastModified, err := time.Parse(time.RFC1123, val) - if err != nil { - return PageBlobClientUploadPagesFromURLResponse{}, err - } - result.LastModified = &lastModified - } - if val := resp.Header.Get("Content-MD5"); val != "" { - contentMD5, err := base64.StdEncoding.DecodeString(val) + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.ContentMD5 = contentMD5 + result.BlobSequenceNumber = &blobSequenceNumber } if val := resp.Header.Get("x-ms-content-crc64"); val != "" { contentCRC64, err := base64.StdEncoding.DecodeString(val) @@ -1260,18 +1248,12 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.ContentCRC64 = contentCRC64 } - if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { - blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - result.BlobSequenceNumber = &blobSequenceNumber - } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val + result.ContentMD5 = contentMD5 } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) @@ -1280,6 +1262,15 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.Date = &date } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = (*azcore.ETag)(&val) + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { isServerEncrypted, err := strconv.ParseBool(val) if err != nil { @@ -1287,11 +1278,18 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon } result.IsServerEncrypted = &isServerEncrypted } - if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { - result.EncryptionKeySHA256 = &val + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return PageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified } - if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { - result.EncryptionScope = &val + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val } return result, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go index 386c943e..738d23c8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -266,6 +265,9 @@ type BlobClientCopyFromURLResponse struct { // ETag contains the information returned from the ETag header response. ETag *azcore.ETag + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -407,6 +409,9 @@ type BlobClientDownloadResponse struct { // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. CopyStatusDescription *string + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + // Date contains the information returned from the Date header response. Date *time.Time @@ -650,18 +655,20 @@ type BlobClientGetPropertiesResponse struct { // BlobClientGetTagsResponse contains the response from method BlobClient.GetTags. type BlobClientGetTagsResponse struct { + // Blob tags BlobTags + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // BlobClientQueryResponse contains the response from method BlobClient.Query. @@ -1045,29 +1052,30 @@ type BlockBlobClientCommitBlockListResponse struct { // BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. type BlockBlobClientGetBlockListResponse struct { BlockList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL. @@ -1310,31 +1318,49 @@ type ContainerClientDeleteResponse struct { Version *string } +// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. +type ContainerClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call + FilterBlobSegment + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + // ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. type ContainerClientGetAccessPolicyResponse struct { // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. - BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` + BlobPublicAccess *PublicAccessType // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // a collection of signed identifiers SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo. @@ -1412,40 +1438,44 @@ type ContainerClientGetPropertiesResponse struct { // ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager. type ContainerClientListBlobFlatSegmentResponse struct { + // An enumeration of blobs ListBlobsFlatSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager. type ContainerClientListBlobHierarchySegmentResponse struct { + // An enumeration of blobs ListBlobsHierarchySegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // ContentType contains the information returned from the Content-Type header response. - ContentType *string `xml:"ContentType"` + ContentType *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease. @@ -1675,52 +1705,56 @@ type PageBlobClientCreateResponse struct { // PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager. type PageBlobClientGetPageRangesDiffResponse struct { + // the list of pages PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager. type PageBlobClientGetPageRangesResponse struct { + // the list of pages PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. - BlobContentLength *int64 `xml:"BlobContentLength"` + BlobContentLength *int64 // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // ETag contains the information returned from the ETag header response. - ETag *azcore.ETag `xml:"ETag"` + ETag *azcore.ETag // LastModified contains the information returned from the Last-Modified header response. - LastModified *time.Time `xml:"LastModified"` + LastModified *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize. @@ -1848,18 +1882,20 @@ type PageBlobClientUploadPagesResponse struct { // ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs. type ServiceClientFilterBlobsResponse struct { + // The result of a Filter Blobs API call FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo. @@ -1888,60 +1924,68 @@ type ServiceClientGetAccountInfoResponse struct { // ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties. type ServiceClientGetPropertiesResponse struct { + // Storage Service Properties. StorageServiceProperties + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics. type ServiceClientGetStatisticsResponse struct { + // Stats for the storage service. StorageServiceStats + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. type ServiceClientGetUserDelegationKeyResponse struct { + // A user delegation key UserDelegationKey + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // Date contains the information returned from the Date header response. - Date *time.Time `xml:"Date"` + Date *time.Time // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager. type ServiceClientListContainersSegmentResponse struct { + // An enumeration of containers ListContainersSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. - ClientRequestID *string `xml:"ClientRequestID"` + ClientRequestID *string // RequestID contains the information returned from the x-ms-request-id header response. - RequestID *string `xml:"RequestID"` + RequestID *string // Version contains the information returned from the x-ms-version header response. - Version *string `xml:"Version"` + Version *string } // ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index c54358c5..9a73b730 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -3,15 +3,15 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "io" @@ -22,21 +22,10 @@ import ( ) // ServiceClient contains the methods for the Service group. -// Don't use this type directly, use NewServiceClient() instead. +// Don't use this type directly, use a constructor function instead. type ServiceClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewServiceClient creates a new instance of ServiceClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { - client := &ServiceClient{ - endpoint: endpoint, - pl: pl, - } - return client } // FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search @@ -44,22 +33,25 @@ func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { // be scoped within the expression to a single container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - where - Filters the results to return only to return only blobs whose tags match the specified expression. // - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { + var err error req, err := client.filterBlobsCreateRequest(ctx, where, options) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientFilterBlobsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientFilterBlobsResponse{}, err } - return client.filterBlobsHandleResponse(resp) + resp, err := client.filterBlobsHandleResponse(httpResp) + return resp, err } // filterBlobsCreateRequest creates the FilterBlobs request. @@ -80,8 +72,11 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -95,12 +90,6 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -108,6 +97,12 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { return ServiceClientFilterBlobsResponse{}, err } @@ -117,21 +112,24 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { + var err error req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetAccountInfoResponse{}, err } - return client.getAccountInfoHandleResponse(resp) + resp, err := client.getAccountInfoHandleResponse(httpResp) + return resp, err } // getAccountInfoCreateRequest creates the GetAccountInfo request. @@ -144,7 +142,7 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -152,15 +150,12 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op // getAccountInfoHandleResponse handles the GetAccountInfo response. func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) { result := ServiceClientGetAccountInfoResponse{} + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -168,12 +163,6 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.Date = &date } - if val := resp.Header.Get("x-ms-sku-name"); val != "" { - result.SKUName = (*SKUName)(&val) - } - if val := resp.Header.Get("x-ms-account-kind"); val != "" { - result.AccountKind = (*AccountKind)(&val) - } if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) if err != nil { @@ -181,6 +170,15 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( } result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } return result, nil } @@ -188,21 +186,24 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( // CORS (Cross-Origin Resource Sharing) rules. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { + var err error req, err := client.getPropertiesCreateRequest(ctx, options) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetPropertiesResponse{}, err } - return client.getPropertiesHandleResponse(resp) + resp, err := client.getPropertiesHandleResponse(httpResp) + return resp, err } // getPropertiesCreateRequest creates the GetProperties request. @@ -218,7 +219,7 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -248,21 +249,24 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S // location endpoint when read-access geo-redundant replication is enabled for the storage account. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { + var err error req, err := client.getStatisticsCreateRequest(ctx, options) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetStatisticsResponse{}, err } - return client.getStatisticsHandleResponse(resp) + resp, err := client.getStatisticsHandleResponse(httpResp) + return resp, err } // getStatisticsCreateRequest creates the GetStatistics request. @@ -278,7 +282,7 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -292,12 +296,6 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -305,6 +303,12 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { return ServiceClientGetStatisticsResponse{}, err } @@ -315,23 +319,26 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S // bearer token authentication. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - keyInfo - Key information // - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey // method. func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) { + var err error req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusOK) { + err = runtime.NewResponseError(httpResp) + return ServiceClientGetUserDelegationKeyResponse{}, err } - return client.getUserDelegationKeyHandleResponse(resp) + resp, err := client.getUserDelegationKeyHandleResponse(httpResp) + return resp, err } // getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. @@ -347,12 +354,15 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, keyInfo) + if err := runtime.MarshalAsXML(req, keyInfo); err != nil { + return nil, err + } + return req, nil } // getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. @@ -361,12 +371,6 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo if val := resp.Header.Get("x-ms-client-request-id"); val != "" { result.ClientRequestID = &val } - if val := resp.Header.Get("x-ms-request-id"); val != "" { - result.RequestID = &val - } - if val := resp.Header.Get("x-ms-version"); val != "" { - result.Version = &val - } if val := resp.Header.Get("Date"); val != "" { date, err := time.Parse(time.RFC1123, val) if err != nil { @@ -374,6 +378,12 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo } result.Date = &date } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } @@ -383,7 +393,7 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo // NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified // account // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager // method. // @@ -411,7 +421,7 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -441,22 +451,25 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp // and CORS (Cross-Origin Resource Sharing) rules // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - storageServiceProperties - The StorageService properties. // - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { + var err error req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSetPropertiesResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusAccepted) { - return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSetPropertiesResponse{}, err } - return client.setPropertiesHandleResponse(resp) + resp, err := client.setPropertiesHandleResponse(httpResp) + return resp, err } // setPropertiesCreateRequest creates the SetProperties request. @@ -472,12 +485,15 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, storageServiceProperties) + if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { + return nil, err + } + return req, nil } // setPropertiesHandleResponse handles the SetProperties response. @@ -498,25 +514,28 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ // - body - Initial data // - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method. func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) { + var err error req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - resp, err := client.pl.Do(req) + httpResp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSubmitBatchResponse{}, err } - if !runtime.HasStatusCode(resp, http.StatusOK) { - return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + if !runtime.HasStatusCode(httpResp, http.StatusAccepted) { + err = runtime.NewResponseError(httpResp) + return ServiceClientSubmitBatchResponse{}, err } - return client.submitBatchHandleResponse(resp) + resp, err := client.submitBatchHandleResponse(httpResp) + return resp, err } // submitBatchCreateRequest creates the SubmitBatch request. @@ -534,12 +553,15 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte runtime.SkipBodyDownload(req) req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/xml") + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go index 4b4d51aa..58665032 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,29 +14,29 @@ import ( ) const ( - rfc1123JSON = `"` + time.RFC1123 + `"` + dateTimeRFC1123JSON = `"` + time.RFC1123 + `"` ) -type timeRFC1123 time.Time +type dateTimeRFC1123 time.Time -func (t timeRFC1123) MarshalJSON() ([]byte, error) { - b := []byte(time.Time(t).Format(rfc1123JSON)) +func (t dateTimeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(dateTimeRFC1123JSON)) return b, nil } -func (t timeRFC1123) MarshalText() ([]byte, error) { +func (t dateTimeRFC1123) MarshalText() ([]byte, error) { b := []byte(time.Time(t).Format(time.RFC1123)) return b, nil } -func (t *timeRFC1123) UnmarshalJSON(data []byte) error { - p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) - *t = timeRFC1123(p) +func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(dateTimeRFC1123JSON, strings.ToUpper(string(data))) + *t = dateTimeRFC1123(p) return err } -func (t *timeRFC1123) UnmarshalText(data []byte) error { +func (t *dateTimeRFC1123) UnmarshalText(data []byte) error { p, err := time.Parse(time.RFC1123, string(data)) - *t = timeRFC1123(p) + *t = dateTimeRFC1123(p) return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go index 1ce9d621..82b37013 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go @@ -3,9 +3,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated @@ -15,45 +14,45 @@ import ( "time" ) -const ( - utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` - utcLayout = "2006-01-02T15:04:05.999999999" - rfc3339JSON = `"` + time.RFC3339Nano + `"` -) - // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) -type timeRFC3339 time.Time +const ( + utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"` + utcDateTime = "2006-01-02T15:04:05.999999999" + dateTimeJSON = `"` + time.RFC3339Nano + `"` +) + +type dateTimeRFC3339 time.Time -func (t timeRFC3339) MarshalJSON() (json []byte, err error) { +func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) { tt := time.Time(t) return tt.MarshalJSON() } -func (t timeRFC3339) MarshalText() (text []byte, err error) { +func (t dateTimeRFC3339) MarshalText() ([]byte, error) { tt := time.Time(t) return tt.MarshalText() } -func (t *timeRFC3339) UnmarshalJSON(data []byte) error { - layout := utcLayoutJSON +func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcDateTimeJSON if tzOffsetRegex.Match(data) { - layout = rfc3339JSON + layout = dateTimeJSON } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { - layout := utcLayout +func (t *dateTimeRFC3339) UnmarshalText(data []byte) error { + layout := utcDateTime if tzOffsetRegex.Match(data) { layout = time.RFC3339Nano } return t.Parse(layout, string(data)) } -func (t *timeRFC3339) Parse(layout, value string) error { +func (t *dateTimeRFC3339) Parse(layout, value string) error { p, err := time.Parse(layout, strings.ToUpper(value)) - *t = timeRFC3339(p) + *t = dateTimeRFC3339(p) return err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go index 144ea18e..1bd0e4de 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go @@ -3,14 +3,16 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -// Code generated by Microsoft (R) AutoRest Code Generator. +// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT. // Changes may cause incorrect behavior and will be lost if the code is regenerated. -// DO NOT EDIT. package generated import ( "encoding/xml" + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "io" "strings" ) @@ -19,22 +21,32 @@ type additionalProperties map[string]*string // UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { tokName := "" - for t, err := d.Token(); err == nil; t, err = d.Token() { + tokValue := "" + for { + t, err := d.Token() + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return err + } switch tt := t.(type) { case xml.StartElement: tokName = strings.ToLower(tt.Name.Local) - break + tokValue = "" case xml.CharData: + if tokName == "" { + continue + } + tokValue = string(tt) + case xml.EndElement: if tokName == "" { continue } if *ap == nil { *ap = additionalProperties{} } - s := string(tt) - (*ap)[tokName] = &s + (*ap)[tokName] = to.Ptr(tokValue) tokName = "" - break } } return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go index ec5541bf..c1b3a3d2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -11,10 +11,15 @@ import ( "errors" ) +const ( + DefaultConcurrency = 5 +) + // BatchTransferOptions identifies options used by doBatchTransfer. type BatchTransferOptions struct { TransferSize int64 ChunkSize int64 + NumChunks uint16 Concurrency uint16 Operation func(ctx context.Context, offset int64, chunkSize int64) error OperationName string @@ -28,13 +33,12 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } if o.Concurrency == 0 { - o.Concurrency = 5 // default concurrency + o.Concurrency = DefaultConcurrency // default concurrency } // Prepare and do parallel operations. - numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently - operationResponseChannel := make(chan error, numChunks) // Holds each response + operationResponseChannel := make(chan error, o.NumChunks) // Holds each response ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -50,10 +54,10 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } // Add each chunk's operation to the channel. - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { curChunkSize := o.ChunkSize - if chunkNum == numChunks-1 { // Last chunk + if chunkNum == o.NumChunks-1 { // Last chunk curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total } offset := int64(chunkNum) * o.ChunkSize @@ -65,7 +69,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { // Wait for the operations to complete. var firstErr error = nil - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { responseError := <-operationResponseChannel // record the first error (the original error which should cause the other chunks to fail with canceled context) if responseError != nil && firstErr == nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go new file mode 100644 index 00000000..e3aa4a48 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +type BufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() (int, error) + + // Free cleans up all buffers. + Free() +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan Mmb + count int + max int + size int64 +} + +func NewMMBPool(maxBuffers int, bufferSize int64) BufferManager[Mmb] { + return &mmbPool{ + buffers: make(chan Mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan Mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() (int, error) { + if pool.count < pool.max { + buffer, err := NewMMB(pool.size) + if err != nil { + return 0, err + } + pool.buffers <- buffer + pool.count++ + } + return pool.count, nil +} + +func (pool *mmbPool) Release(buffer Mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.Delete() + } + pool.count = 0 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go new file mode 100644 index 00000000..e7c8e921 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go @@ -0,0 +1,113 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +import ( + "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strings" +) + +type storageAuthorizer struct { + scopes []string + tenantID string +} + +func NewStorageChallengePolicy(cred azcore.TokenCredential) policy.Policy { + s := storageAuthorizer{scopes: []string{TokenScope}} + return runtime.NewBearerTokenPolicy(cred, []string{TokenScope}, &policy.BearerTokenOptions{ + AuthorizationHandler: policy.AuthorizationHandler{ + OnRequest: s.onRequest, + OnChallenge: s.onChallenge, + }, + }) +} + +func (s *storageAuthorizer) onRequest(req *policy.Request, authNZ func(policy.TokenRequestOptions) error) error { + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +func (s *storageAuthorizer) onChallenge(req *policy.Request, resp *http.Response, authNZ func(policy.TokenRequestOptions) error) error { + // parse the challenge + err := s.parseChallenge(resp) + if err != nil { + return err + } + // TODO: Set tenantID when policy.TokenRequestOptions supports it. https://github.com/Azure/azure-sdk-for-go/issues/19841 + return authNZ(policy.TokenRequestOptions{Scopes: s.scopes}) +} + +type challengePolicyError struct { + err error +} + +func (c *challengePolicyError) Error() string { + return c.err.Error() +} + +func (*challengePolicyError) NonRetriable() { + // marker method +} + +func (c *challengePolicyError) Unwrap() error { + return c.err +} + +// parses Tenant ID from auth challenge +// https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/authorize +func parseTenant(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + if len(parts) >= 3 { + tenant := parts[3] + tenant = strings.ReplaceAll(tenant, ",", "") + return tenant + } else { + return "" + } +} + +func (s *storageAuthorizer) parseChallenge(resp *http.Response) error { + authHeader := resp.Header.Get("WWW-Authenticate") + if authHeader == "" { + return &challengePolicyError{err: errors.New("response has no WWW-Authenticate header for challenge authentication")} + } + + // Strip down to auth and resource + // Format is "Bearer authorization_uri=\"\" resource_id=\"\"" + authHeader = strings.ReplaceAll(authHeader, "Bearer ", "") + + parts := strings.Split(authHeader, " ") + + vals := map[string]string{} + for _, part := range parts { + subParts := strings.Split(part, "=") + if len(subParts) == 2 { + stripped := strings.ReplaceAll(subParts[1], "\"", "") + stripped = strings.TrimSuffix(stripped, ",") + vals[subParts[0]] = stripped + } + } + + s.tenantID = parseTenant(vals["authorization_uri"]) + + scope := vals["resource_id"] + if scope == "" { + return &challengePolicyError{err: errors.New("could not find a valid resource in the WWW-Authenticate header")} + } + + if !strings.HasSuffix(scope, "/.default") { + scope += "/.default" + } + s.scopes = []string{scope} + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go similarity index 73% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go index dcccc37c..cdcadf31 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go @@ -1,11 +1,11 @@ -//go:build go1.18 && (linux || darwin || freebsd || openbsd || netbsd || solaris) +//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix) // +build go1.18 -// +build linux darwin freebsd openbsd netbsd solaris +// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -package blockblob +package shared import ( "fmt" @@ -14,20 +14,20 @@ import ( ) // mmb is a memory mapped buffer -type mmb []byte +type Mmb []byte // newMMB creates a new memory mapped buffer with the specified size -func newMMB(size int64) (mmb, error) { +func NewMMB(size int64) (Mmb, error) { prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) if err != nil { return nil, os.NewSyscallError("Mmap", err) } - return mmb(addr), nil + return Mmb(addr), nil } // delete cleans up the memory mapped buffer -func (m *mmb) delete() { +func (m *Mmb) Delete() { err := syscall.Munmap(*m) *m = nil if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go similarity index 78% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go index 2acef3a7..ef9fdc2a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go @@ -4,7 +4,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -package blockblob +package shared import ( "fmt" @@ -14,11 +14,11 @@ import ( "unsafe" ) -// mmb is a memory mapped buffer -type mmb []byte +// Mmb is a memory mapped buffer +type Mmb []byte -// newMMB creates a new memory mapped buffer with the specified size -func newMMB(size int64) (mmb, error) { +// NewMMB creates a new memory mapped buffer with the specified size +func NewMMB(size int64) (Mmb, error) { const InvalidHandleValue = ^uintptr(0) // -1 prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) @@ -26,14 +26,16 @@ func newMMB(size int64) (mmb, error) { if err != nil { return nil, os.NewSyscallError("CreateFileMapping", err) } - defer syscall.CloseHandle(hMMF) + defer func() { + _ = syscall.CloseHandle(hMMF) + }() addr, err := syscall.MapViewOfFile(hMMF, access, 0, 0, uintptr(size)) if err != nil { return nil, os.NewSyscallError("MapViewOfFile", err) } - m := mmb{} + m := Mmb{} h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) h.Data = addr h.Len = int(size) @@ -41,10 +43,10 @@ func newMMB(size int64) (mmb, error) { return m, nil } -// delete cleans up the memory mapped buffer -func (m *mmb) delete() { +// Delete cleans up the memory mapped buffer +func (m *Mmb) Delete() { addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) - *m = mmb{} + *m = Mmb{} err := syscall.UnmapViewOfFile(addr) if err != nil { // if we get here, there is likely memory corruption. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go index 7751781d..c131facf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -38,10 +38,21 @@ const ( HeaderIfNoneMatch = "If-None-Match" HeaderIfUnmodifiedSince = "If-Unmodified-Since" HeaderRange = "Range" + HeaderXmsVersion = "x-ms-version" + HeaderXmsRequestID = "x-ms-request-id" ) const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 +const ( + AppendBlobClient = "azblob/appendblob.Client" + BlobClient = "azblob/blob.Client" + BlockBlobClient = "azblob/blockblob.Client" + ContainerClient = "azblob/container.Client" + PageBlobClient = "azblob/pageblob.Client" + ServiceClient = "azblob/service.Client" +) + var CRC64Table = crc64.MakeTable(crc64Polynomial) // CopyOptions returns a zero-value T if opts is nil. @@ -85,22 +96,6 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err connStrMap[parts[0]] = parts[1] } - accountName, ok := connStrMap["AccountName"] - if !ok { - return ParsedConnectionString{}, errors.New("connection string missing AccountName") - } - - accountKey, ok := connStrMap["AccountKey"] - if !ok { - sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] - if !ok { - return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature") - } - return ParsedConnectionString{ - ServiceURL: fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), - }, nil - } - protocol, ok := connStrMap["DefaultEndpointsProtocol"] if !ok { protocol = defaultScheme @@ -111,26 +106,44 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err suffix = defaultSuffix } - if blobEndpoint, ok := connStrMap["BlobEndpoint"]; ok { + blobEndpoint, has_blobEndpoint := connStrMap["BlobEndpoint"] + accountName, has_accountName := connStrMap["AccountName"] + + var serviceURL string + if has_blobEndpoint { + serviceURL = blobEndpoint + } else if has_accountName { + serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountName or BlobEndpoint") + } + + if !strings.HasSuffix(serviceURL, "/") { + // add a trailing slash to be consistent with the portal + serviceURL += "/" + } + + accountKey, has_accountKey := connStrMap["AccountKey"] + sharedAccessSignature, has_sharedAccessSignature := connStrMap["SharedAccessSignature"] + + if has_accountName && has_accountKey { return ParsedConnectionString{ - ServiceURL: blobEndpoint, + ServiceURL: serviceURL, AccountName: accountName, AccountKey: accountKey, }, nil + } else if has_sharedAccessSignature { + return ParsedConnectionString{ + ServiceURL: fmt.Sprintf("%v?%v", serviceURL, sharedAccessSignature), + }, nil + } else { + return ParsedConnectionString{}, errors.New("connection string needs either AccountKey or SharedAccessSignature") } - return ParsedConnectionString{ - ServiceURL: fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix), - AccountName: accountName, - AccountKey: accountKey, - }, nil } // SerializeBlobTags converts tags to generated.BlobTags func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { - if tagsMap == nil { - return nil - } blobTagSet := make([]*generated.BlobTag, 0) for key, val := range tagsMap { newKey, newVal := key, val @@ -140,7 +153,7 @@ func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags { } func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string { - if tagsMap == nil { + if len(tagsMap) == 0 { return nil } tags := make([]string, 0) @@ -241,3 +254,27 @@ func IsIPEndpointStyle(host string) bool { } return net.ParseIP(host) != nil } + +// ReadAtLeast reads from r into buf until it has read at least min bytes. +// It returns the number of bytes copied and an error. +// The EOF error is returned if no bytes were read or +// EOF happened after reading fewer than min bytes. +// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer. +// On return, n >= min if and only if err == nil. +// If r returns an error having read at least min bytes, the error is dropped. +// This method is same as io.ReadAtLeast except that it does not +// return io.ErrUnexpectedEOF when fewer than min bytes are read. +func ReadAtLeast(r io.Reader, buf []byte, min int) (n int, err error) { + if len(buf) < min { + return 0, io.ErrShortBuffer + } + for n < min && err == nil { + var nn int + nn, err = r.Read(buf[n:]) + n += nn + } + if n >= min { + err = nil + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go index a090394a..4d26992d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/log.go @@ -3,9 +3,14 @@ package azblob -import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) const ( // EventUpload is used for logging events related to upload operation. EventUpload = exported.EventUpload + + // EventSubmitBatch is used for logging events related to submit blob batch operation. + EventSubmitBatch = exported.EventSubmitBatch ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go index 31cd951f..14e90a1f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -8,6 +8,7 @@ package pageblob import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" "io" "net/http" "net/url" @@ -25,9 +26,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a client to an Azure Storage page blob; type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] @@ -37,12 +36,15 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -51,9 +53,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -63,10 +68,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -121,7 +129,7 @@ func (pb *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil } // WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. @@ -133,7 +141,7 @@ func (pb *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil } // Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. @@ -230,7 +238,7 @@ func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[ if err != nil { return GetPageRangesResponse{}, err } - resp, err := pb.generated().Pipeline().Do(req) + resp, err := pb.generated().InternalClient().Pipeline().Do(req) if err != nil { return GetPageRangesResponse{}, err } @@ -263,7 +271,7 @@ func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtim if err != nil { return GetPageRangesDiffResponse{}, err } - resp, err := pb.generated().Pipeline().Do(req) + resp, err := pb.generated().InternalClient().Pipeline().Do(req) if err != nil { return GetPageRangesDiffResponse{}, err } @@ -363,6 +371,12 @@ func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOption return pb.BlobClient().GetProperties(ctx, o) } +// GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. +func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOptions) (blob.GetAccountInfoResponse, error) { + return pb.BlobClient().GetAccountInfo(ctx, o) +} + // SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) { @@ -413,6 +427,12 @@ func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co return pb.BlobClient().CopyFromURL(ctx, copySource, o) } +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at Page blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (pb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) { + return pb.BlobClient().GetSASURL(permissions, expiry, o) +} + // Concurrent Download Functions ----------------------------------------------------------------------------------------- // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go index 472df472..4069bb13 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -25,13 +25,14 @@ type UserDelegationCredential = exported.UserDelegationCredential // AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas type AccountSignatureValues struct { - Version string `param:"sv"` // If not specified, this format to SASVersion - Protocol Protocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() - IPRange IPRange `param:"sip"` - ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() + EncryptionScope string `param:"ses"` } // SignWithSharedKey uses an account's shared key credential to sign this signature values to produce @@ -50,6 +51,12 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey } v.Permissions = perms.String() + resources, err := parseAccountResourceTypes(v.ResourceTypes) + if err != nil { + return QueryParameters{}, err + } + v.ResourceTypes = resources.String() + startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{}) stringToSign := strings.Join([]string{ @@ -62,6 +69,7 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey v.IPRange.String(), string(v.Protocol), v.Version, + v.EncryptionScope, ""}, // That is right, the account SAS requires a terminating extra newline "\n") @@ -71,12 +79,13 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey } p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Account-specific SAS parameters services: "b", // will always be "b" @@ -90,13 +99,13 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey } // AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call Client.GetSASURL with it or use the String method to set AccountSASSignatureValues Permissions field. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' Permissions field. type AccountPermissions struct { Read, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Add, Create, Update, Process, FilterByTags, Tag, SetImmutabilityPolicy bool } // String produces the SAS permissions string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues' Permissions field. +// Call this method to set AccountSignatureValues' Permissions field. func (p *AccountPermissions) String() string { var buffer bytes.Buffer if p.Read { @@ -141,7 +150,7 @@ func (p *AccountPermissions) String() string { return buffer.String() } -// Parse initializes the AccountSASPermissions' fields from a string. +// Parse initializes the AccountPermissions' fields from a string. func parseAccountPermissions(s string) (AccountPermissions, error) { p := AccountPermissions{} // Clear out the flags for _, r := range s { @@ -180,13 +189,13 @@ func parseAccountPermissions(s string) (AccountPermissions, error) { } // AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. -// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues' ResourceTypes field. +// Initialize an instance of this type and then call its String method to set AccountSignatureValues' ResourceTypes field. type AccountResourceTypes struct { Service, Container, Object bool } // String produces the SAS resource types string for an Azure Storage account. -// Call this method to set AccountSASSignatureValues' ResourceTypes field. +// Call this method to set AccountSignatureValues' ResourceTypes field. func (rt *AccountResourceTypes) String() string { var buffer bytes.Buffer if rt.Service { @@ -200,3 +209,21 @@ func (rt *AccountResourceTypes) String() string { } return buffer.String() } + +// parseAccountResourceTypes initializes the AccountResourceTypes' fields from a string. +func parseAccountResourceTypes(s string) (AccountResourceTypes, error) { + rt := AccountResourceTypes{} + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return AccountResourceTypes{}, fmt.Errorf("invalid resource type character: '%v'", r) + } + } + return rt, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go index 4d97372d..4c23208e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -23,7 +23,7 @@ const ( var ( // Version is the default version encoded in the SAS token. - Version = "2020-02-10" + Version = "2021-12-02" ) // TimeFormats ISO 8601 format. @@ -143,6 +143,7 @@ type QueryParameters struct { authorizedObjectID string `param:"saoid"` unauthorizedObjectID string `param:"suoid"` correlationID string `param:"scid"` + encryptionScope string `param:"ses"` // private member used for startTime and expiryTime formatting. stTimeFormat string seTimeFormat string @@ -163,6 +164,11 @@ func (p *QueryParameters) SignedCorrelationID() string { return p.correlationID } +// EncryptionScope returns encryptionScope +func (p *QueryParameters) EncryptionScope() string { + return p.encryptionScope +} + // SignedOID returns signedOID. func (p *QueryParameters) SignedOID() string { return p.signedOID @@ -355,6 +361,9 @@ func (p *QueryParameters) Encode() string { if p.correlationID != "" { v.Add("scid", p.correlationID) } + if p.encryptionScope != "" { + v.Add("ses", p.encryptionScope) + } return v.Encode() } @@ -429,6 +438,8 @@ func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) Q p.unauthorizedObjectID = val case "scid": p.correlationID = val + case "ses": + p.encryptionScope = val default: isSASKey = false // We didn't recognize the query parameter } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index 3ccda6aa..45f73084 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -8,6 +8,7 @@ package sas import ( "bytes" + "errors" "fmt" "strings" "time" @@ -24,7 +25,7 @@ type BlobSignatureValues struct { StartTime time.Time `param:"st"` // Not specified if IsZero ExpiryTime time.Time `param:"se"` // Not specified if IsZero SnapshotTime time.Time - Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() + Permissions string `param:"sp"` // Create by initializing ContainerPermissions or BlobPermissions and then call String() IPRange IPRange `param:"sip"` Identifier string `param:"si"` ContainerName string @@ -39,6 +40,7 @@ type BlobSignatureValues struct { AuthorizedObjectID string // saoid UnauthorizedObjectID string // suoid CorrelationID string // scid + EncryptionScope string `param:"ses"` } func getDirectoryDepth(path string) string { @@ -50,17 +52,11 @@ func getDirectoryDepth(path string) string { // SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { - if sharedKeyCredential == nil { - return QueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + if v.Identifier == "" && (v.ExpiryTime.IsZero() || v.Permissions == "") { + return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") } - //Make sure the permission characters are in the correct order - perms, err := parseBlobPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err - } - v.Permissions = perms.String() - + // Parse the resource resource := "c" if !v.SnapshotTime.IsZero() { resource = "bs" @@ -75,6 +71,21 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre resource = "b" } + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + if v.Version == "" { v.Version = Version } @@ -93,7 +104,8 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre string(v.Protocol), v.Version, resource, - snapshotTime, // signed timestamp + snapshotTime, // signed timestamp + v.EncryptionScope, v.CacheControl, // rscc v.ContentDisposition, // rscd v.ContentEncoding, // rsce @@ -108,12 +120,13 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Container/Blob-specific SAS parameters resource: resource, @@ -141,6 +154,10 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key") } + if v.ExpiryTime.IsZero() || v.Permissions == "" { + return QueryParameters{}, errors.New("user delegation SAS is missing at least one of these: ExpiryTime or Permissions") + } + // Parse the resource resource := "c" if !v.SnapshotTime.IsZero() { @@ -197,7 +214,8 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us string(v.Protocol), v.Version, resource, - snapshotTime, // signed timestamp + snapshotTime, // signed timestamp + v.EncryptionScope, v.CacheControl, // rscc v.ContentDisposition, // rscd v.ContentEncoding, // rsce @@ -212,12 +230,13 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Container/Blob-specific SAS parameters resource: resource, @@ -261,15 +280,15 @@ func getCanonicalName(account string, containerName string, blobName string, dir } // ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS. -// Initialize an instance of this type and then call Client.GetSASURL with it or use the String method to set BlobSASSignatureValues Permissions field. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. // All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob type ContainerPermissions struct { - Read, Add, Create, Write, Delete, DeletePreviousVersion, List, FilterByTags, Move, SetImmutabilityPolicy bool - Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, FilterByTags, Move, SetImmutabilityPolicy bool + Execute, ModifyOwnership, ModifyPermissions bool // Meant for hierarchical namespace accounts } // String produces the SAS permissions string for an Azure Storage container. -// Call this method to set BlobSASSignatureValues' Permissions field. +// Call this method to set BlobSignatureValues' Permissions field. func (p *ContainerPermissions) String() string { var b bytes.Buffer if p.Read { @@ -293,6 +312,9 @@ func (p *ContainerPermissions) String() string { if p.List { b.WriteRune('l') } + if p.Tag { + b.WriteRune('t') + } if p.FilterByTags { b.WriteRune('f') } @@ -333,6 +355,8 @@ func parseContainerPermissions(s string) (ContainerPermissions, error) { p.DeletePreviousVersion = true case 'l': p.List = true + case 't': + p.Tag = true case 'f': p.FilterByTags = true case 'm': @@ -353,13 +377,13 @@ func parseContainerPermissions(s string) (ContainerPermissions, error) { } // BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. -// Initialize an instance of this type and then call Client.GetSASURL with it or use the String method to set BlobSASSignatureValues Permissions field. +// Initialize an instance of this type and then call its String method to set BlobSignatureValues' Permissions field. type BlobPermissions struct { Read, Add, Create, Write, Delete, DeletePreviousVersion, PermanentDelete, List, Tag, Move, Execute, Ownership, Permissions, SetImmutabilityPolicy bool } // String produces the SAS permissions string for an Azure Storage blob. -// Call this method to set BlobSignatureValue's Permissions field. +// Call this method to set BlobSignatureValues' Permissions field. func (p *BlobPermissions) String() string { var b bytes.Buffer if p.Read { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go new file mode 100644 index 00000000..924fd108 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/batch_builder.go @@ -0,0 +1,94 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package service + +import ( + "context" + "fmt" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" +) + +// BatchBuilder is used for creating the batch operations list. It contains the list of either delete or set tier sub-requests. +// NOTE: All sub-requests in the batch must be of the same type, either delete or set tier. +type BatchBuilder struct { + endpoint string + authPolicy policy.Policy + subRequests []*policy.Request + operationType *exported.BlobBatchOperationType +} + +func (bb *BatchBuilder) checkOperationType(operationType exported.BlobBatchOperationType) error { + if bb.operationType == nil { + bb.operationType = &operationType + return nil + } + if *bb.operationType != operationType { + return fmt.Errorf("BlobBatch only supports one operation type per batch and is already being used for %s operations", *bb.operationType) + } + return nil +} + +// Delete operation is used to add delete sub-request to the batch builder. +func (bb *BatchBuilder) Delete(containerName string, blobName string, options *BatchDeleteOptions) error { + err := bb.checkOperationType(exported.BatchDeleteOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + deleteOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).DeleteCreateRequest(context.TODO(), deleteOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} + +// SetTier operation is used to add set tier sub-request to the batch builder. +func (bb *BatchBuilder) SetTier(containerName string, blobName string, accessTier blob.AccessTier, options *BatchSetTierOptions) error { + err := bb.checkOperationType(exported.BatchSetTierOperationType) + if err != nil { + return err + } + + blobName = url.PathEscape(blobName) + blobURL := runtime.JoinPaths(bb.endpoint, containerName, blobName) + + blobClient, err := blob.NewClientWithNoCredential(blobURL, nil) + if err != nil { + return err + } + + setTierOptions, leaseInfo, accessConditions := options.format() + req, err := getGeneratedBlobClient(blobClient).SetTierCreateRequest(context.TODO(), accessTier, setTierOptions, leaseInfo, accessConditions) + if err != nil { + return err + } + + // remove x-ms-version header + exported.UpdateSubRequestHeaders(req) + + bb.subRequests = append(bb.subRequests, req) + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go index 526f540c..46177534 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -7,8 +7,13 @@ package service import ( + "bytes" "context" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "net/http" "strings" "time" @@ -18,7 +23,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" @@ -26,9 +30,7 @@ import ( ) // ClientOptions contains the optional parameters when creating a Client. -type ClientOptions struct { - azcore.ClientOptions -} +type ClientOptions base.ClientOptions // Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. type Client base.Client[generated.ServiceClient] @@ -38,12 +40,15 @@ type Client base.Client[generated.ServiceClient] // - cred - an Azure AD credential, typically obtained via the azidentity module // - options - client options; pass nil to accept the default values func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { - authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil) + authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -52,9 +57,12 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -64,10 +72,14 @@ func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Clie func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewServiceClient(serviceURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -115,6 +127,15 @@ func (s *Client) sharedKey() *SharedKeyCredential { return base.SharedKey((*base.Client[generated.ServiceClient])(s)) } +func (s *Client) credential() any { + return base.Credential((*base.Client[generated.ServiceClient])(s)) +} + +// helper method to return the generated.BlobClient which is used for creating the sub-requests +func getGeneratedBlobClient(b *blob.Client) *generated.BlobClient { + return base.InnerClient((*base.Client[generated.BlobClient])(b)) +} + // URL returns the URL endpoint used by the Client object. func (s *Client) URL() string { return s.generated().Endpoint() @@ -124,7 +145,7 @@ func (s *Client) URL() string { // this Client's URL. The new container.Client uses the same request policy pipeline as the Client. func (s *Client) NewContainerClient(containerName string) *container.Client { containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) - return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.sharedKey())) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(shared.ContainerClient), s.credential())) } // CreateContainer is a lifecycle method to creates a new container under the specified account. @@ -154,6 +175,7 @@ func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName stri } // GetAccountInfo provides account level information +// For more information, see https://learn.microsoft.com/en-us/rest/api/storageservices/get-account-information?tabs=shared-access-signatures. func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) { getAccountInfoOptions := o.format() resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions) @@ -172,6 +194,9 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager if o.Include.Metadata { listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata) } + if o.Include.System { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeSystem) + } listOptions.Marker = o.Marker listOptions.Maxresults = o.MaxResults listOptions.Prefix = o.Prefix @@ -192,7 +217,7 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager if err != nil { return ListContainersResponse{}, err } - resp, err := s.generated().Pipeline().Do(req) + resp, err := s.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListContainersResponse{}, err } @@ -280,3 +305,68 @@ func (s *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOp resp, err := s.generated().FilterBlobs(ctx, where, serviceFilterBlobsOptions) return resp, err } + +// NewBatchBuilder creates an instance of BatchBuilder using the same auth policy as the client. +// BatchBuilder is used to build the batch consisting of either delete or set tier sub-requests. +// All sub-requests in the batch must be of the same type, either delete or set tier. +// NOTE: Service level Blob Batch operation is supported only when the Client was created using SharedKeyCredential and Account SAS. +func (s *Client) NewBatchBuilder() (*BatchBuilder, error) { + var authPolicy policy.Policy + + switch cred := s.credential().(type) { + case *azcore.TokenCredential: + authPolicy = shared.NewStorageChallengePolicy(*cred) + case *SharedKeyCredential: + authPolicy = exported.NewSharedKeyCredPolicy(cred) + case nil: + // for authentication using SAS + authPolicy = nil + default: + return nil, fmt.Errorf("unrecognised authentication type %T", cred) + } + + return &BatchBuilder{ + endpoint: s.URL(), + authPolicy: authPolicy, + }, nil +} + +// SubmitBatch operation allows multiple API calls to be embedded into a single HTTP request. +// It builds the request body using the BatchBuilder object passed. +// BatchBuilder contains the list of operations to be submitted. It supports up to 256 sub-requests in a single batch. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/blob-batch. +func (s *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *SubmitBatchOptions) (SubmitBatchResponse, error) { + if bb == nil || len(bb.subRequests) == 0 { + return SubmitBatchResponse{}, errors.New("batch builder is empty") + } + + // create the request body + batchReq, batchID, err := exported.CreateBatchRequest(&exported.BlobBatchBuilder{ + AuthPolicy: bb.authPolicy, + SubRequests: bb.subRequests, + }) + if err != nil { + return SubmitBatchResponse{}, err + } + + reader := bytes.NewReader(batchReq) + rsc := streaming.NopCloser(reader) + multipartContentType := "multipart/mixed; boundary=" + batchID + + resp, err := s.generated().SubmitBatch(ctx, int64(len(batchReq)), multipartContentType, rsc, options.format()) + if err != nil { + return SubmitBatchResponse{}, err + } + + batchResponses, err := exported.ParseBlobBatchResponse(resp.Body, resp.ContentType, bb.subRequests) + if err != nil { + return SubmitBatchResponse{}, err + } + + return SubmitBatchResponse{ + Responses: batchResponses, + ContentType: resp.ContentType, + RequestID: resp.RequestID, + Version: resp.Version, + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go index 4e0d7740..b70724d7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go @@ -8,6 +8,7 @@ package service import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" @@ -155,6 +156,9 @@ type ListContainersInclude struct { // Tells the service whether to return soft-deleted containers. Deleted bool + + // Tells the service whether to return system containers. + System bool } // --------------------------------------------------------------------------------------------------------------------- @@ -299,3 +303,59 @@ func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions Maxresults: o.MaxResults, } } + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchDeleteOptions contains the optional parameters for the BatchBuilder.Delete method. +type BatchDeleteOptions struct { + blob.DeleteOptions + VersionID *string + Snapshot *string +} + +func (o *BatchDeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + DeleteType: o.BlobDeleteType, // None by default + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// BatchSetTierOptions contains the optional parameters for the BatchBuilder.SetTier method. +type BatchSetTierOptions struct { + blob.SetTierOptions + VersionID *string + Snapshot *string +} + +func (o *BatchSetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := generated.BlobClientSetTierOptions{ + RehydratePriority: o.RehydratePriority, + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions) + return &basics, leaseAccessConditions, modifiedAccessConditions +} + +// SubmitBatchOptions contains the optional parameters for the Client.SubmitBatch method. +type SubmitBatchOptions struct { + // placeholder for future options +} + +func (o *SubmitBatchOptions) format() *generated.ServiceClientSubmitBatchOptions { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go index d8b0f4d4..2dbf9716 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/responses.go @@ -7,6 +7,7 @@ package service import ( + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) @@ -42,3 +43,21 @@ type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse // GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey. type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse + +// SubmitBatchResponse contains the response from method Client.SubmitBatch. +type SubmitBatchResponse struct { + // Responses contains the responses of the sub-requests in the batch + Responses []*BatchResponseItem + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// BatchResponseItem contains the response for the individual sub-requests. +type BatchResponseItem = exported.BatchResponseItem diff --git a/vendor/github.com/ClickHouse/ch-go/proto/block.go b/vendor/github.com/ClickHouse/ch-go/proto/block.go index aaf97d89..34548067 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/block.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/block.go @@ -169,7 +169,7 @@ func (b Block) EncodeRawBlock(buf *Buffer, version int, input []InputColumn) err col.EncodeStart(buf, version) if v, ok := col.Data.(Preparable); ok { if err := v.Prepare(); err != nil { - return errors.Wrap(err, "prepare") + return errors.Wrapf(err, "prepare %q", col.Name) } } if col.Data.Rows() == 0 { @@ -279,7 +279,7 @@ func (b *Block) DecodeBlock(r *Reader, version int, target Result) error { } } if err := b.DecodeRawBlock(r, version, target); err != nil { - return err + return errors.Wrap(err, "raw block") } return nil diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go b/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go index 0ac09233..ad574368 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_arr.go @@ -132,12 +132,18 @@ func (c ColArr[T]) EncodeColumn(b *Buffer) { // Append appends new row to column. func (c *ColArr[T]) Append(v []T) { - for _, s := range v { - c.Data.Append(s) - } + c.Data.AppendArr(v) c.Offsets = append(c.Offsets, uint64(c.Data.Rows())) } +// AppendArr appends new slice of rows to column. +func (c *ColArr[T]) AppendArr(vs [][]T) { + for _, v := range vs { + c.Data.AppendArr(v) + c.Offsets = append(c.Offsets, uint64(c.Data.Rows())) + } +} + // Result for current column. func (c *ColArr[T]) Result(column string) ResultColumn { return ResultColumn{Name: column, Data: c} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go b/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go index d2b0b612..2ed9b8ea 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_auto.go @@ -58,6 +58,12 @@ func (c *ColAuto) Infer(t ColumnType) error { c.Data = new(ColDate) case "Map(String,String)": c.Data = NewMap[string, string](new(ColStr), new(ColStr)) + case ColumnTypeUUID: + c.Data = new(ColUUID) + case ColumnTypeArray.Sub(ColumnTypeUUID): + c.Data = new(ColUUID).Array() + case ColumnTypeNullable.Sub(ColumnTypeUUID): + c.Data = new(ColUUID).Nullable() default: switch t.Base() { case ColumnTypeDateTime: diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go index 0a849619..70928c65 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_auto_gen.go @@ -112,6 +112,48 @@ func inferGenerated(t ColumnType) Column { return new(ColUInt256).Nullable() case ColumnTypeUInt256: return new(ColUInt256) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("8")): + return new(ColFixedStr8).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("8")): + return new(ColFixedStr8).Nullable() + case ColumnTypeFixedString.With("8"): + return new(ColFixedStr8) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("16")): + return new(ColFixedStr16).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("16")): + return new(ColFixedStr16).Nullable() + case ColumnTypeFixedString.With("16"): + return new(ColFixedStr16) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("32")): + return new(ColFixedStr32).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("32")): + return new(ColFixedStr32).Nullable() + case ColumnTypeFixedString.With("32"): + return new(ColFixedStr32) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("64")): + return new(ColFixedStr64).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("64")): + return new(ColFixedStr64).Nullable() + case ColumnTypeFixedString.With("64"): + return new(ColFixedStr64) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("128")): + return new(ColFixedStr128).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("128")): + return new(ColFixedStr128).Nullable() + case ColumnTypeFixedString.With("128"): + return new(ColFixedStr128) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("256")): + return new(ColFixedStr256).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("256")): + return new(ColFixedStr256).Nullable() + case ColumnTypeFixedString.With("256"): + return new(ColFixedStr256) + case ColumnTypeArray.Sub(ColumnTypeFixedString.With("512")): + return new(ColFixedStr512).Array() + case ColumnTypeNullable.Sub(ColumnTypeFixedString.With("512")): + return new(ColFixedStr512).Nullable() + case ColumnTypeFixedString.With("512"): + return new(ColFixedStr512) default: return nil } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool.go index 0f1120bc..0974e53c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_bool.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool.go @@ -19,6 +19,10 @@ func (c *ColBool) Append(v bool) { *c = append(*c, v) } +func (c *ColBool) AppendArr(vs []bool) { + *c = append(*c, vs...) +} + // Type returns ColumnType of Bool. func (ColBool) Type() ColumnType { return ColumnTypeBool diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go index 00e47235..3e998e46 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_safe.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego package proto diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go index 5a33c9c4..92cac707 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_bool_unsafe.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego package proto diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date.go index 61d79e63..5bf75b3b 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date.go @@ -6,6 +6,16 @@ func (c *ColDate) Append(v time.Time) { *c = append(*c, ToDate(v)) } +func (c *ColDate) AppendArr(vs []time.Time) { + var dates = make([]Date, len(vs)) + + for i, v := range vs { + dates[i] = ToDate(v) + } + + *c = append(*c, dates...) +} + func (c ColDate) Row(i int) time.Time { return c[i].Time() } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go index f47644c5..38f1a916 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32.go @@ -6,6 +6,16 @@ func (c *ColDate32) Append(v time.Time) { *c = append(*c, ToDate32(v)) } +func (c *ColDate32) AppendArr(vs []time.Time) { + var dates = make([]Date32, len(vs)) + + for i, v := range vs { + dates[i] = ToDate32(v) + } + + *c = append(*c, dates...) +} + func (c ColDate32) Row(i int) time.Time { return c[i].Time() } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go index 1233a577..7e6ac3d6 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go index 21475ab1..2690a318 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date32_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go index 9dadd596..49bb89b5 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go index 3975805c..980d8b4c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_date_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go index 65d05bd2..4243f2ba 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime.go @@ -65,6 +65,16 @@ func (c *ColDateTime) Append(v time.Time) { c.Data = append(c.Data, ToDateTime(v)) } +func (c *ColDateTime) AppendArr(vs []time.Time) { + var dates = make([]DateTime, len(vs)) + + for i, v := range vs { + dates[i] = ToDateTime(v) + } + + c.Data = append(c.Data, dates...) +} + // LowCardinality returns LowCardinality for Enum8 . func (c *ColDateTime) LowCardinality() *ColLowCardinality[time.Time] { return &ColLowCardinality[time.Time]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go index 13471b4c..f4d96a49 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64.go @@ -111,6 +111,16 @@ func (c *ColDateTime64) Append(v time.Time) { c.AppendRaw(ToDateTime64(v, c.Precision)) } +func (c *ColDateTime64) AppendArr(v []time.Time) { + if !c.PrecisionSet { + panic("DateTime64: no precision set") + } + + for _, item := range v { + c.AppendRaw(ToDateTime64(item, c.Precision)) + } +} + // Raw version of ColDateTime64 for ColumnOf[DateTime64]. func (c ColDateTime64) Raw() *ColDateTime64Raw { return &ColDateTime64Raw{ColDateTime64: c} @@ -132,4 +142,9 @@ type ColDateTime64Raw struct { } func (c *ColDateTime64Raw) Append(v DateTime64) { c.AppendRaw(v) } +func (c *ColDateTime64Raw) AppendArr(vs []DateTime64) { + for _, v := range vs { + c.AppendRaw(v) + } +} func (c ColDateTime64Raw) Row(i int) DateTime64 { return c.Data[i] } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go index 121d0485..ccff09d2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go index 4f197fa2..4eeeaf5a 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime64_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go index 8998155a..20e9aad8 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go index 9e8e34c7..40a056c5 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_datetime_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go index 11828fbd..18811acd 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_gen.go @@ -37,6 +37,11 @@ func (c *ColDecimal128) Append(v Decimal128) { *c = append(*c, v) } +// Append Decimal128 slice to column. +func (c *ColDecimal128) AppendArr(vs []Decimal128) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Decimal128 . func (c *ColDecimal128) LowCardinality() *ColLowCardinality[Decimal128] { return &ColLowCardinality[Decimal128]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go index a693514d..58c02eb2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go index 6f5fdbcb..1b2fe125 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal128_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go index 419512d8..ad96b27a 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_gen.go @@ -37,6 +37,11 @@ func (c *ColDecimal256) Append(v Decimal256) { *c = append(*c, v) } +// Append Decimal256 slice to column. +func (c *ColDecimal256) AppendArr(vs []Decimal256) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Decimal256 . func (c *ColDecimal256) LowCardinality() *ColLowCardinality[Decimal256] { return &ColLowCardinality[Decimal256]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go index bacd5f46..301b7b14 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go index 88a8cde6..b0d694ef 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal256_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go index e1fe67f6..2c4f4eaf 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_gen.go @@ -37,6 +37,11 @@ func (c *ColDecimal32) Append(v Decimal32) { *c = append(*c, v) } +// Append Decimal32 slice to column. +func (c *ColDecimal32) AppendArr(vs []Decimal32) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Decimal32 . func (c *ColDecimal32) LowCardinality() *ColLowCardinality[Decimal32] { return &ColLowCardinality[Decimal32]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go index b26ebefa..44cb9f7a 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go index 2f2b0095..eaed3dff 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal32_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go index fc26f848..c37ffcd3 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_gen.go @@ -37,6 +37,11 @@ func (c *ColDecimal64) Append(v Decimal64) { *c = append(*c, v) } +// Append Decimal64 slice to column. +func (c *ColDecimal64) AppendArr(vs []Decimal64) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Decimal64 . func (c *ColDecimal64) LowCardinality() *ColLowCardinality[Decimal64] { return &ColLowCardinality[Decimal64]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go index 550c3ed2..a0934c68 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go index 7e5b9a4e..f5ba1b2d 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_decimal64_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go index 0ed39ad3..f4af963b 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum.go @@ -47,6 +47,10 @@ func (e *ColEnum) Append(v string) { e.Values = append(e.Values, v) } +func (e *ColEnum) AppendArr(vs []string) { + e.Values = append(e.Values, vs...) +} + func (e *ColEnum) parse(t ColumnType) error { if e.rawToStr == nil { e.rawToStr = map[int]string{} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go index 3fc2f068..3f99c642 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_gen.go @@ -37,6 +37,11 @@ func (c *ColEnum16) Append(v Enum16) { *c = append(*c, v) } +// Append Enum16 slice to column. +func (c *ColEnum16) AppendArr(vs []Enum16) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Enum16 . func (c *ColEnum16) LowCardinality() *ColLowCardinality[Enum16] { return &ColLowCardinality[Enum16]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go index b192951b..bf3b0122 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go index 4868d10b..52757012 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum16_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go index ac190d27..a063eaf8 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_gen.go @@ -37,6 +37,11 @@ func (c *ColEnum8) Append(v Enum8) { *c = append(*c, v) } +// Append Enum8 slice to column. +func (c *ColEnum8) AppendArr(vs []Enum8) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Enum8 . func (c *ColEnum8) LowCardinality() *ColLowCardinality[Enum8] { return &ColLowCardinality[Enum8]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go index d6f599bb..edf5712c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go index c66a3759..09e6fe2b 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_enum8_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go index a77f2a2a..982cfa1b 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixed_str.go @@ -66,6 +66,12 @@ func (c *ColFixedStr) Append(b []byte) { c.Buf = append(c.Buf, b...) } +func (c *ColFixedStr) AppendArr(vs [][]byte) { + for _, v := range vs { + c.Append(v) + } +} + // EncodeColumn encodes ColFixedStr rows to *Buffer. func (c ColFixedStr) EncodeColumn(b *Buffer) { b.Buf = append(b.Buf, c.Buf...) diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go new file mode 100644 index 00000000..cb769537 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr128 represents FixedStr128 column. +type ColFixedStr128 [][128]byte + +// Compile-time assertions for ColFixedStr128. +var ( + _ ColInput = ColFixedStr128{} + _ ColResult = (*ColFixedStr128)(nil) + _ Column = (*ColFixedStr128)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr128) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr128) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr128. +func (ColFixedStr128) Type() ColumnType { + return ColumnTypeFixedString.With("128") +} + +// Row returns i-th row of column. +func (c ColFixedStr128) Row(i int) [128]byte { + return c[i] +} + +// Append [128]byte to column. +func (c *ColFixedStr128) Append(v [128]byte) { + *c = append(*c, v) +} + +// Append [128]byte slice to column. +func (c *ColFixedStr128) AppendArr(vs [][128]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr128 . +func (c *ColFixedStr128) LowCardinality() *ColLowCardinality[[128]byte] { + return &ColLowCardinality[[128]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [128]byte. +func (c *ColFixedStr128) Array() *ColArr[[128]byte] { + return &ColArr[[128]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([128]byte). +func (c *ColFixedStr128) Nullable() *ColNullable[[128]byte] { + return &ColNullable[[128]byte]{ + Values: c, + } +} + +// NewArrFixedStr128 returns new Array(FixedStr128). +func NewArrFixedStr128() *ColArr[[128]byte] { + return &ColArr[[128]byte]{ + Data: new(ColFixedStr128), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go new file mode 100644 index 00000000..edf7f9cd --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr128 rows from *Reader. +func (c *ColFixedStr128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 128 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[128]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr128 rows to *Buffer. +func (c ColFixedStr128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 128 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go new file mode 100644 index 00000000..46ee96c7 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr128_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr128 rows from *Reader. +func (c *ColFixedStr128) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][128]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 128 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr128 rows to *Buffer. +func (c ColFixedStr128) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 128 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go new file mode 100644 index 00000000..adfc2de1 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr16 represents FixedStr16 column. +type ColFixedStr16 [][16]byte + +// Compile-time assertions for ColFixedStr16. +var ( + _ ColInput = ColFixedStr16{} + _ ColResult = (*ColFixedStr16)(nil) + _ Column = (*ColFixedStr16)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr16) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr16) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr16. +func (ColFixedStr16) Type() ColumnType { + return ColumnTypeFixedString.With("16") +} + +// Row returns i-th row of column. +func (c ColFixedStr16) Row(i int) [16]byte { + return c[i] +} + +// Append [16]byte to column. +func (c *ColFixedStr16) Append(v [16]byte) { + *c = append(*c, v) +} + +// Append [16]byte slice to column. +func (c *ColFixedStr16) AppendArr(vs [][16]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr16 . +func (c *ColFixedStr16) LowCardinality() *ColLowCardinality[[16]byte] { + return &ColLowCardinality[[16]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [16]byte. +func (c *ColFixedStr16) Array() *ColArr[[16]byte] { + return &ColArr[[16]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([16]byte). +func (c *ColFixedStr16) Nullable() *ColNullable[[16]byte] { + return &ColNullable[[16]byte]{ + Values: c, + } +} + +// NewArrFixedStr16 returns new Array(FixedStr16). +func NewArrFixedStr16() *ColArr[[16]byte] { + return &ColArr[[16]byte]{ + Data: new(ColFixedStr16), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go new file mode 100644 index 00000000..4a9313ab --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr16 rows from *Reader. +func (c *ColFixedStr16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 16 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[16]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr16 rows to *Buffer. +func (c ColFixedStr16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 16 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go new file mode 100644 index 00000000..5d0dbeee --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr16_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr16 rows from *Reader. +func (c *ColFixedStr16) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][16]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 16 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr16 rows to *Buffer. +func (c ColFixedStr16) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 16 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go new file mode 100644 index 00000000..1e2d9559 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr256 represents FixedStr256 column. +type ColFixedStr256 [][256]byte + +// Compile-time assertions for ColFixedStr256. +var ( + _ ColInput = ColFixedStr256{} + _ ColResult = (*ColFixedStr256)(nil) + _ Column = (*ColFixedStr256)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr256) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr256) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr256. +func (ColFixedStr256) Type() ColumnType { + return ColumnTypeFixedString.With("256") +} + +// Row returns i-th row of column. +func (c ColFixedStr256) Row(i int) [256]byte { + return c[i] +} + +// Append [256]byte to column. +func (c *ColFixedStr256) Append(v [256]byte) { + *c = append(*c, v) +} + +// Append [256]byte slice to column. +func (c *ColFixedStr256) AppendArr(vs [][256]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr256 . +func (c *ColFixedStr256) LowCardinality() *ColLowCardinality[[256]byte] { + return &ColLowCardinality[[256]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [256]byte. +func (c *ColFixedStr256) Array() *ColArr[[256]byte] { + return &ColArr[[256]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([256]byte). +func (c *ColFixedStr256) Nullable() *ColNullable[[256]byte] { + return &ColNullable[[256]byte]{ + Values: c, + } +} + +// NewArrFixedStr256 returns new Array(FixedStr256). +func NewArrFixedStr256() *ColArr[[256]byte] { + return &ColArr[[256]byte]{ + Data: new(ColFixedStr256), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go new file mode 100644 index 00000000..bb961f8b --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr256 rows from *Reader. +func (c *ColFixedStr256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 256 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[256]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr256 rows to *Buffer. +func (c ColFixedStr256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 256 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go new file mode 100644 index 00000000..277ac598 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr256_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr256 rows from *Reader. +func (c *ColFixedStr256) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][256]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 256 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr256 rows to *Buffer. +func (c ColFixedStr256) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 256 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go new file mode 100644 index 00000000..90adba9e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr32 represents FixedStr32 column. +type ColFixedStr32 [][32]byte + +// Compile-time assertions for ColFixedStr32. +var ( + _ ColInput = ColFixedStr32{} + _ ColResult = (*ColFixedStr32)(nil) + _ Column = (*ColFixedStr32)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr32) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr32) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr32. +func (ColFixedStr32) Type() ColumnType { + return ColumnTypeFixedString.With("32") +} + +// Row returns i-th row of column. +func (c ColFixedStr32) Row(i int) [32]byte { + return c[i] +} + +// Append [32]byte to column. +func (c *ColFixedStr32) Append(v [32]byte) { + *c = append(*c, v) +} + +// Append [32]byte slice to column. +func (c *ColFixedStr32) AppendArr(vs [][32]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr32 . +func (c *ColFixedStr32) LowCardinality() *ColLowCardinality[[32]byte] { + return &ColLowCardinality[[32]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [32]byte. +func (c *ColFixedStr32) Array() *ColArr[[32]byte] { + return &ColArr[[32]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([32]byte). +func (c *ColFixedStr32) Nullable() *ColNullable[[32]byte] { + return &ColNullable[[32]byte]{ + Values: c, + } +} + +// NewArrFixedStr32 returns new Array(FixedStr32). +func NewArrFixedStr32() *ColArr[[32]byte] { + return &ColArr[[32]byte]{ + Data: new(ColFixedStr32), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go new file mode 100644 index 00000000..cdaf62d0 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr32 rows from *Reader. +func (c *ColFixedStr32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 32 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[32]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr32 rows to *Buffer. +func (c ColFixedStr32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 32 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go new file mode 100644 index 00000000..3777e5e8 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr32_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr32 rows from *Reader. +func (c *ColFixedStr32) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][32]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 32 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr32 rows to *Buffer. +func (c ColFixedStr32) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 32 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go new file mode 100644 index 00000000..09837fa8 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr512 represents FixedStr512 column. +type ColFixedStr512 [][512]byte + +// Compile-time assertions for ColFixedStr512. +var ( + _ ColInput = ColFixedStr512{} + _ ColResult = (*ColFixedStr512)(nil) + _ Column = (*ColFixedStr512)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr512) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr512) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr512. +func (ColFixedStr512) Type() ColumnType { + return ColumnTypeFixedString.With("512") +} + +// Row returns i-th row of column. +func (c ColFixedStr512) Row(i int) [512]byte { + return c[i] +} + +// Append [512]byte to column. +func (c *ColFixedStr512) Append(v [512]byte) { + *c = append(*c, v) +} + +// Append [512]byte slice to column. +func (c *ColFixedStr512) AppendArr(vs [][512]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr512 . +func (c *ColFixedStr512) LowCardinality() *ColLowCardinality[[512]byte] { + return &ColLowCardinality[[512]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [512]byte. +func (c *ColFixedStr512) Array() *ColArr[[512]byte] { + return &ColArr[[512]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([512]byte). +func (c *ColFixedStr512) Nullable() *ColNullable[[512]byte] { + return &ColNullable[[512]byte]{ + Values: c, + } +} + +// NewArrFixedStr512 returns new Array(FixedStr512). +func NewArrFixedStr512() *ColArr[[512]byte] { + return &ColArr[[512]byte]{ + Data: new(ColFixedStr512), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go new file mode 100644 index 00000000..aa8ea319 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr512 rows from *Reader. +func (c *ColFixedStr512) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 512 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[512]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr512 rows to *Buffer. +func (c ColFixedStr512) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 512 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go new file mode 100644 index 00000000..970ca0f0 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr512_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr512 rows from *Reader. +func (c *ColFixedStr512) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][512]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 512 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr512 rows to *Buffer. +func (c ColFixedStr512) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 512 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go new file mode 100644 index 00000000..38849ccd --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr64 represents FixedStr64 column. +type ColFixedStr64 [][64]byte + +// Compile-time assertions for ColFixedStr64. +var ( + _ ColInput = ColFixedStr64{} + _ ColResult = (*ColFixedStr64)(nil) + _ Column = (*ColFixedStr64)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr64) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr64) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr64. +func (ColFixedStr64) Type() ColumnType { + return ColumnTypeFixedString.With("64") +} + +// Row returns i-th row of column. +func (c ColFixedStr64) Row(i int) [64]byte { + return c[i] +} + +// Append [64]byte to column. +func (c *ColFixedStr64) Append(v [64]byte) { + *c = append(*c, v) +} + +// Append [64]byte slice to column. +func (c *ColFixedStr64) AppendArr(vs [][64]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr64 . +func (c *ColFixedStr64) LowCardinality() *ColLowCardinality[[64]byte] { + return &ColLowCardinality[[64]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [64]byte. +func (c *ColFixedStr64) Array() *ColArr[[64]byte] { + return &ColArr[[64]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([64]byte). +func (c *ColFixedStr64) Nullable() *ColNullable[[64]byte] { + return &ColNullable[[64]byte]{ + Values: c, + } +} + +// NewArrFixedStr64 returns new Array(FixedStr64). +func NewArrFixedStr64() *ColArr[[64]byte] { + return &ColArr[[64]byte]{ + Data: new(ColFixedStr64), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go new file mode 100644 index 00000000..89c1f24e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr64 rows from *Reader. +func (c *ColFixedStr64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 64 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[64]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr64 rows to *Buffer. +func (c ColFixedStr64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 64 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go new file mode 100644 index 00000000..62ec09e5 --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr64_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr64 rows from *Reader. +func (c *ColFixedStr64) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][64]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 64 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr64 rows to *Buffer. +func (c ColFixedStr64) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 64 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go new file mode 100644 index 00000000..a58723ee --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_gen.go @@ -0,0 +1,71 @@ +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +// ColFixedStr8 represents FixedStr8 column. +type ColFixedStr8 [][8]byte + +// Compile-time assertions for ColFixedStr8. +var ( + _ ColInput = ColFixedStr8{} + _ ColResult = (*ColFixedStr8)(nil) + _ Column = (*ColFixedStr8)(nil) +) + +// Rows returns count of rows in column. +func (c ColFixedStr8) Rows() int { + return len(c) +} + +// Reset resets data in row, preserving capacity for efficiency. +func (c *ColFixedStr8) Reset() { + *c = (*c)[:0] +} + +// Type returns ColumnType of FixedStr8. +func (ColFixedStr8) Type() ColumnType { + return ColumnTypeFixedString.With("8") +} + +// Row returns i-th row of column. +func (c ColFixedStr8) Row(i int) [8]byte { + return c[i] +} + +// Append [8]byte to column. +func (c *ColFixedStr8) Append(v [8]byte) { + *c = append(*c, v) +} + +// Append [8]byte slice to column. +func (c *ColFixedStr8) AppendArr(vs [][8]byte) { + *c = append(*c, vs...) +} + +// LowCardinality returns LowCardinality for FixedStr8 . +func (c *ColFixedStr8) LowCardinality() *ColLowCardinality[[8]byte] { + return &ColLowCardinality[[8]byte]{ + index: c, + } +} + +// Array is helper that creates Array of [8]byte. +func (c *ColFixedStr8) Array() *ColArr[[8]byte] { + return &ColArr[[8]byte]{ + Data: c, + } +} + +// Nullable is helper that creates Nullable([8]byte). +func (c *ColFixedStr8) Nullable() *ColNullable[[8]byte] { + return &ColNullable[[8]byte]{ + Values: c, + } +} + +// NewArrFixedStr8 returns new Array(FixedStr8). +func NewArrFixedStr8() *ColArr[[8]byte] { + return &ColArr[[8]byte]{ + Data: new(ColFixedStr8), + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go new file mode 100644 index 00000000..086ea6fc --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_safe_gen.go @@ -0,0 +1,55 @@ +//go:build !(amd64 || arm64 || riscv64) || purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "encoding/binary" + + "github.com/go-faster/errors" +) + +var _ = binary.LittleEndian // clickHouse uses LittleEndian + +// DecodeColumn decodes FixedStr8 rows from *Reader. +func (c *ColFixedStr8) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + const size = 8 + data, err := r.ReadRaw(rows * size) + if err != nil { + return errors.Wrap(err, "read") + } + v := *c + // Move bound check out of loop. + // + // See https://github.com/golang/go/issues/30945. + _ = data[len(data)-size] + for i := 0; i <= len(data)-size; i += size { + v = append(v, + *(*[8]byte)(data[i : i+size]), + ) + } + *c = v + return nil +} + +// EncodeColumn encodes FixedStr8 rows to *Buffer. +func (c ColFixedStr8) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + const size = 8 + offset := len(b.Buf) + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + for _, vv := range v { + copy( + b.Buf[offset:offset+size], + vv[:], + ) + offset += size + } +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go new file mode 100644 index 00000000..9991c06e --- /dev/null +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_fixedstr8_unsafe_gen.go @@ -0,0 +1,45 @@ +//go:build (amd64 || arm64 || riscv64) && !purego + +// Code generated by ./cmd/ch-gen-col, DO NOT EDIT. + +package proto + +import ( + "unsafe" + + "github.com/go-faster/errors" +) + +// DecodeColumn decodes FixedStr8 rows from *Reader. +func (c *ColFixedStr8) DecodeColumn(r *Reader, rows int) error { + if rows == 0 { + return nil + } + *c = append(*c, make([][8]byte, rows)...) + s := *(*slice)(unsafe.Pointer(c)) + const size = 8 + s.Len *= size + s.Cap *= size + dst := *(*[]byte)(unsafe.Pointer(&s)) + if err := r.ReadFull(dst); err != nil { + return errors.Wrap(err, "read full") + } + return nil +} + +// EncodeColumn encodes FixedStr8 rows to *Buffer. +func (c ColFixedStr8) EncodeColumn(b *Buffer) { + v := c + if len(v) == 0 { + return + } + offset := len(b.Buf) + const size = 8 + b.Buf = append(b.Buf, make([]byte, size*len(v))...) + s := *(*slice)(unsafe.Pointer(&v)) + s.Len *= size + s.Cap *= size + src := *(*[]byte)(unsafe.Pointer(&s)) + dst := b.Buf[offset:] + copy(dst, src) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go index 377b2f3c..7031f111 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_gen.go @@ -37,6 +37,11 @@ func (c *ColFloat32) Append(v float32) { *c = append(*c, v) } +// Append float32 slice to column. +func (c *ColFloat32) AppendArr(vs []float32) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Float32 . func (c *ColFloat32) LowCardinality() *ColLowCardinality[float32] { return &ColLowCardinality[float32]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go index 32139a1f..f400aef2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go index 574284c1..2ded35f8 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float32_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go index a733a717..c210eb8d 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_gen.go @@ -37,6 +37,11 @@ func (c *ColFloat64) Append(v float64) { *c = append(*c, v) } +// Append float64 slice to column. +func (c *ColFloat64) AppendArr(vs []float64) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Float64 . func (c *ColFloat64) LowCardinality() *ColLowCardinality[float64] { return &ColLowCardinality[float64]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go index cdf607ee..68281aec 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go index fae23ab8..f16fd396 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_float64_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go index 34dade40..5e982c4f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_gen.go @@ -37,6 +37,11 @@ func (c *ColInt128) Append(v Int128) { *c = append(*c, v) } +// Append Int128 slice to column. +func (c *ColInt128) AppendArr(vs []Int128) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Int128 . func (c *ColInt128) LowCardinality() *ColLowCardinality[Int128] { return &ColLowCardinality[Int128]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go index 2d786c33..5902d3f9 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go index 28c2d8dc..c5862fff 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int128_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go index ec255588..212801d9 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_gen.go @@ -37,6 +37,11 @@ func (c *ColInt16) Append(v int16) { *c = append(*c, v) } +// Append int16 slice to column. +func (c *ColInt16) AppendArr(vs []int16) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Int16 . func (c *ColInt16) LowCardinality() *ColLowCardinality[int16] { return &ColLowCardinality[int16]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go index 1af147ca..75523a44 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go index 60411262..6ba5e50c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int16_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go index 2952b31b..5d7454b5 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_gen.go @@ -37,6 +37,11 @@ func (c *ColInt256) Append(v Int256) { *c = append(*c, v) } +// Append Int256 slice to column. +func (c *ColInt256) AppendArr(vs []Int256) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Int256 . func (c *ColInt256) LowCardinality() *ColLowCardinality[Int256] { return &ColLowCardinality[Int256]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go index c353d88c..0b9f8f1c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go index fb3bb0f5..2433bc92 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int256_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go index bb08c54a..46b09585 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_gen.go @@ -37,6 +37,11 @@ func (c *ColInt32) Append(v int32) { *c = append(*c, v) } +// Append int32 slice to column. +func (c *ColInt32) AppendArr(vs []int32) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Int32 . func (c *ColInt32) LowCardinality() *ColLowCardinality[int32] { return &ColLowCardinality[int32]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go index 800d91fc..52f78c14 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go index e6c5a333..b2e10256 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int32_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go index 5cb27af2..4c8875c1 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_gen.go @@ -37,6 +37,11 @@ func (c *ColInt64) Append(v int64) { *c = append(*c, v) } +// Append int64 slice to column. +func (c *ColInt64) AppendArr(vs []int64) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Int64 . func (c *ColInt64) LowCardinality() *ColLowCardinality[int64] { return &ColLowCardinality[int64]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go index 6f3cd810..400367d3 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go index 2e85b515..5c6f2658 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int64_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go index 9883fc6d..98a71a28 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_gen.go @@ -37,6 +37,11 @@ func (c *ColInt8) Append(v int8) { *c = append(*c, v) } +// Append int8 slice to column. +func (c *ColInt8) AppendArr(vs []int8) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for Int8 . func (c *ColInt8) LowCardinality() *ColLowCardinality[int8] { return &ColLowCardinality[int8]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go index 6e8209a4..a79459d2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go index 748de299..1c62c7db 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_int8_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go index 3c3aef83..4c7a0bc9 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_gen.go @@ -37,6 +37,11 @@ func (c *ColIPv4) Append(v IPv4) { *c = append(*c, v) } +// Append IPv4 slice to column. +func (c *ColIPv4) AppendArr(vs []IPv4) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for IPv4 . func (c *ColIPv4) LowCardinality() *ColLowCardinality[IPv4] { return &ColLowCardinality[IPv4]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go index b0fbcdc8..8b0b7902 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go index bc505976..5fc0b7c7 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv4_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go index 80213e04..5907bd71 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_gen.go @@ -37,6 +37,11 @@ func (c *ColIPv6) Append(v IPv6) { *c = append(*c, v) } +// Append IPv6 slice to column. +func (c *ColIPv6) AppendArr(vs []IPv6) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for IPv6 . func (c *ColIPv6) LowCardinality() *ColLowCardinality[IPv6] { return &ColLowCardinality[IPv6]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go index 2c011f38..9a5870d2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go index 55f50f0e..5650b498 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_ipv6_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go index 27d1c41c..ffed5809 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_low_cardinality.go @@ -13,34 +13,6 @@ var ( _ Column = (*ColLowCardinality[string])(nil) ) -// DecodeState implements StateDecoder, ensuring state for index column. -func (c *ColLowCardinality[T]) DecodeState(r *Reader) error { - keySerialization, err := r.Int64() - if err != nil { - return errors.Wrap(err, "version") - } - if keySerialization != int64(sharedDictionariesWithAdditionalKeys) { - return errors.Errorf("got version %d, expected %d", - keySerialization, sharedDictionariesWithAdditionalKeys, - ) - } - if s, ok := c.index.(StateDecoder); ok { - if err := s.DecodeState(r); err != nil { - return errors.Wrap(err, "index state") - } - } - return nil -} - -// EncodeState implements StateEncoder, ensuring state for index column. -func (c ColLowCardinality[T]) EncodeState(b *Buffer) { - // Writing key serialization version. - b.PutInt64(int64(sharedDictionariesWithAdditionalKeys)) - if s, ok := c.index.(StateEncoder); ok { - s.EncodeState(b) - } -} - //go:generate go run github.com/dmarkham/enumer -type CardinalityKey -trimprefix Key -output col_low_cardinality_enum.go // CardinalityKey is integer type of ColLowCardinality.Keys column. @@ -119,6 +91,34 @@ type ColLowCardinality[T comparable] struct { keys []int } +// DecodeState implements StateDecoder, ensuring state for index column. +func (c *ColLowCardinality[T]) DecodeState(r *Reader) error { + keySerialization, err := r.Int64() + if err != nil { + return errors.Wrap(err, "version") + } + if keySerialization != int64(sharedDictionariesWithAdditionalKeys) { + return errors.Errorf("got version %d, expected %d", + keySerialization, sharedDictionariesWithAdditionalKeys, + ) + } + if s, ok := c.index.(StateDecoder); ok { + if err := s.DecodeState(r); err != nil { + return errors.Wrap(err, "index state") + } + } + return nil +} + +// EncodeState implements StateEncoder, ensuring state for index column. +func (c ColLowCardinality[T]) EncodeState(b *Buffer) { + // Writing key serialization version. + b.PutInt64(int64(sharedDictionariesWithAdditionalKeys)) + if s, ok := c.index.(StateEncoder); ok { + s.EncodeState(b) + } +} + func (c *ColLowCardinality[T]) DecodeColumn(r *Reader, rows int) error { if rows == 0 { // Skipping entirely of no rows. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_map.go b/vendor/github.com/ClickHouse/ch-go/proto/col_map.go index 19fecc2e..90925fb2 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_map.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_map.go @@ -1,6 +1,8 @@ package proto import ( + "strings" + "github.com/go-faster/errors" ) @@ -78,6 +80,38 @@ func (c ColMap[K, V]) Row(i int) map[K]V { return m } +// RowKV returns a slice of KV[K, V] for a given row. +func (c ColMap[K, V]) RowKV(i int) []KV[K, V] { + var start int + end := int(c.Offsets[i]) + if i > 0 { + start = int(c.Offsets[i-1]) + } + v := make([]KV[K, V], 0, end-start) + for idx := start; idx < end; idx++ { + v = append(v, KV[K, V]{ + Key: c.Keys.Row(idx), + Value: c.Values.Row(idx), + }) + } + return v +} + +// KV is a key-value pair. +type KV[K comparable, V any] struct { + Key K + Value V +} + +// AppendKV is a convenience method for appending a slice of KV[K, V]. +func (c *ColMap[K, V]) AppendKV(kv []KV[K, V]) { + for _, v := range kv { + c.Keys.Append(v.Key) + c.Values.Append(v.Value) + } + c.Offsets.Append(uint64(c.Keys.Rows())) +} + func (c *ColMap[K, V]) Append(m map[K]V) { for k, v := range m { c.Keys.Append(k) @@ -129,3 +163,39 @@ func (c ColMap[K, V]) EncodeColumn(b *Buffer) { c.Keys.EncodeColumn(b) c.Values.EncodeColumn(b) } + +// Prepare ensures Preparable column propagation. +func (c ColMap[K, V]) Prepare() error { + if v, ok := c.Keys.(Preparable); ok { + if err := v.Prepare(); err != nil { + return errors.Wrap(err, "prepare data") + } + } + if v, ok := c.Values.(Preparable); ok { + if err := v.Prepare(); err != nil { + return errors.Wrap(err, "prepare data") + } + } + return nil +} + +// Infer ensures Inferable column propagation. +func (c *ColMap[K, V]) Infer(t ColumnType) error { + elems := strings.Split(string(t.Elem()), ",") + if len(elems) != 2 { + return errors.New("invalid map type") + } + if v, ok := c.Keys.(Inferable); ok { + ct := ColumnType(strings.TrimSpace(elems[0])) + if err := v.Infer(ct); err != nil { + return errors.Wrap(err, "infer data") + } + } + if v, ok := c.Values.(Inferable); ok { + ct := ColumnType(strings.TrimSpace(elems[1])) + if err := v.Infer(ct); err != nil { + return errors.Wrap(err, "infer data") + } + } + return nil +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go b/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go index 1b86ee08..1a825091 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_nothing.go @@ -15,10 +15,14 @@ type Nothing struct{} // https://clickhouse.com/docs/ru/sql-reference/data-types/special-data-types/nothing type ColNothing int -func (c *ColNothing) Append(v Nothing) { +func (c *ColNothing) Append(_ Nothing) { *c++ } +func (c *ColNothing) AppendArr(vs []Nothing) { + *c = ColNothing(int(*c) + len(vs)) +} + func (c ColNothing) Row(i int) Nothing { if i >= int(c) { panic(fmt.Sprintf("[%d] of [%d]Nothing", i, c)) diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go b/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go index e8e291e3..516245f0 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_nullable.go @@ -35,7 +35,7 @@ func Null[T any]() Nullable[T] { func (n Nullable[T]) IsSet() bool { return n.Set } func (n Nullable[T]) Or(v T) T { - if n.Set { + if !n.Set { return v } return n.Value diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go b/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go index 0172ca38..325a17b3 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_raw_of.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego package proto diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_str.go b/vendor/github.com/ClickHouse/ch-go/proto/col_str.go index dfb67f63..8f48ad7f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_str.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_str.go @@ -113,6 +113,12 @@ func (c ColStr) ForEachBytes(f func(i int, b []byte) error) error { // DecodeColumn decodes String rows from *Reader. func (c *ColStr) DecodeColumn(r *Reader, rows int) error { var p Position + size := len(c.Pos) + if cap(c.Pos) < size+rows { + c.Pos = append(c.Pos, make([]Position, size+rows-cap(c.Pos))...) + } + c.Pos = c.Pos[:0] + c.Buf = c.Buf[:cap(c.Buf)] for i := 0; i < rows; i++ { n, err := r.StrLen() if err != nil { @@ -122,12 +128,22 @@ func (c *ColStr) DecodeColumn(r *Reader, rows int) error { p.Start = p.End p.End += n - c.Buf = append(c.Buf, make([]byte, n)...) + if len(c.Buf) < p.End { + var an int + if n < 128 { + // small size, do batch buffer alloc + an = n * (rows - i) + } else { + an = n + } + c.Buf = append(c.Buf, make([]byte, an)...) + } if err := r.ReadFull(c.Buf[p.Start:p.End]); err != nil { return errors.Wrapf(err, "row %d: read full", i) } c.Pos = append(c.Pos, p) } + c.Buf = c.Buf[:p.End] return nil } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go index f399319f..e34f07e9 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_gen.go @@ -37,6 +37,11 @@ func (c *ColUInt128) Append(v UInt128) { *c = append(*c, v) } +// Append UInt128 slice to column. +func (c *ColUInt128) AppendArr(vs []UInt128) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for UInt128 . func (c *ColUInt128) LowCardinality() *ColLowCardinality[UInt128] { return &ColLowCardinality[UInt128]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go index bc427777..bbe55dce 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go index 4ef24b2a..5989b5f3 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint128_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go index f9d7d4c7..7bc8ba63 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_gen.go @@ -37,6 +37,11 @@ func (c *ColUInt16) Append(v uint16) { *c = append(*c, v) } +// Append uint16 slice to column. +func (c *ColUInt16) AppendArr(vs []uint16) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for UInt16 . func (c *ColUInt16) LowCardinality() *ColLowCardinality[uint16] { return &ColLowCardinality[uint16]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go index 54f8ccd6..219f3a6c 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go index 6d24308a..d98d9534 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint16_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go index d3ac257b..b68a119e 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_gen.go @@ -37,6 +37,11 @@ func (c *ColUInt256) Append(v UInt256) { *c = append(*c, v) } +// Append UInt256 slice to column. +func (c *ColUInt256) AppendArr(vs []UInt256) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for UInt256 . func (c *ColUInt256) LowCardinality() *ColLowCardinality[UInt256] { return &ColLowCardinality[UInt256]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go index 16491bd9..68633e19 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go index e78156bb..02488d3f 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint256_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go index 384215ef..41abca5b 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_gen.go @@ -37,6 +37,11 @@ func (c *ColUInt32) Append(v uint32) { *c = append(*c, v) } +// Append uint32 slice to column. +func (c *ColUInt32) AppendArr(vs []uint32) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for UInt32 . func (c *ColUInt32) LowCardinality() *ColLowCardinality[uint32] { return &ColLowCardinality[uint32]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go index 49b4222c..0bc7de95 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go index be4ab80e..3ddfa760 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint32_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go index 764ea00c..4521cd45 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_gen.go @@ -37,6 +37,11 @@ func (c *ColUInt64) Append(v uint64) { *c = append(*c, v) } +// Append uint64 slice to column. +func (c *ColUInt64) AppendArr(vs []uint64) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for UInt64 . func (c *ColUInt64) LowCardinality() *ColLowCardinality[uint64] { return &ColLowCardinality[uint64]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go index e5ed04b1..deea8a48 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_safe_gen.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go index 0a803dec..664f80f0 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint64_unsafe_gen.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego // Code generated by ./cmd/ch-gen-col, DO NOT EDIT. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go index 97b06e33..02c05161 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uint8_gen.go @@ -37,6 +37,11 @@ func (c *ColUInt8) Append(v uint8) { *c = append(*c, v) } +// Append uint8 slice to column. +func (c *ColUInt8) AppendArr(vs []uint8) { + *c = append(*c, vs...) +} + // LowCardinality returns LowCardinality for UInt8 . func (c *ColUInt8) LowCardinality() *ColLowCardinality[uint8] { return &ColLowCardinality[uint8]{ diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid.go index a27d3fb8..1b9685af 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid.go @@ -21,3 +21,13 @@ func (c ColUUID) Row(i int) uuid.UUID { return c[i] } func (c *ColUUID) Reset() { *c = (*c)[:0] } func (c *ColUUID) Append(v uuid.UUID) { *c = append(*c, v) } func (c *ColUUID) AppendArr(v []uuid.UUID) { *c = append(*c, v...) } + +// Nullable is helper that creates Nullable(uuid.UUID). +func (c *ColUUID) Nullable() *ColNullable[uuid.UUID] { + return NewColNullable[uuid.UUID](c) +} + +// Array is helper that creates Array of uuid.UUID. +func (c *ColUUID) Array() *ColArr[uuid.UUID] { + return NewArray[uuid.UUID](c) +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go index e93cdcd1..8de94088 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_safe.go @@ -1,4 +1,4 @@ -//go:build !(amd64 || arm64) || purego +//go:build !(amd64 || arm64 || riscv64) || purego package proto diff --git a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go index ccf4e4cb..18fa73fc 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/col_uuid_unsafe.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego package proto diff --git a/vendor/github.com/ClickHouse/ch-go/proto/column.go b/vendor/github.com/ClickHouse/ch-go/proto/column.go index 172026e4..1cdeb31b 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/column.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/column.go @@ -31,6 +31,7 @@ type Column interface { type ColumnOf[T any] interface { Column Append(v T) + AppendArr(v []T) Row(i int) T } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/date32.go b/vendor/github.com/ClickHouse/ch-go/proto/date32.go index 00ec39ce..6c1330f7 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/date32.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/date32.go @@ -5,16 +5,13 @@ import "time" // Date32 represents Date32 value. // // https://clickhouse.com/docs/en/sql-reference/data-types/date32/ -type Date32 uint32 - -// date32Epoch is unix time of 1925-01-01. -const date32Epoch = -1420070400 +type Date32 int32 // Unix returns unix timestamp of Date32. // // You can use time.Unix(d.Unix(), 0) to get Time in time.Local location. func (d Date32) Unix() int64 { - return secInDay*int64(d) + date32Epoch + return secInDay * int64(d) } // Time returns UTC starting time.Time of Date32. @@ -29,7 +26,7 @@ func (d Date32) String() string { // ToDate32 returns Date32 of time.Time. func ToDate32(t time.Time) Date32 { _, offset := t.Zone() - return Date32((t.Unix() + int64(offset) - date32Epoch) / secInDay) + return Date32((t.Unix() + int64(offset)) / secInDay) } // NewDate32 returns the Date32 corresponding to year, month and day in UTC. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/profile_enum.go b/vendor/github.com/ClickHouse/ch-go/proto/profile_enum.go index 514ee14d..47b26fd9 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/profile_enum.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/profile_enum.go @@ -1,8 +1,9 @@ -// Code generated by "enumer -type ProfileEventType -trimprefix Profile -output profile_enum.go"; DO NOT EDIT. +// Code generated by "enumer -type ProfileEventType -trimprefix Profile -text -json -output profile_enum.go"; DO NOT EDIT. package proto import ( + "encoding/json" "fmt" "strings" ) @@ -77,3 +78,32 @@ func (i ProfileEventType) IsAProfileEventType() bool { } return false } + +// MarshalJSON implements the json.Marshaler interface for ProfileEventType +func (i ProfileEventType) MarshalJSON() ([]byte, error) { + return json.Marshal(i.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface for ProfileEventType +func (i *ProfileEventType) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("ProfileEventType should be a string, got %s", data) + } + + var err error + *i, err = ProfileEventTypeString(s) + return err +} + +// MarshalText implements the encoding.TextMarshaler interface for ProfileEventType +func (i ProfileEventType) MarshalText() ([]byte, error) { + return []byte(i.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for ProfileEventType +func (i *ProfileEventType) UnmarshalText(text []byte) error { + var err error + *i, err = ProfileEventTypeString(string(text)) + return err +} diff --git a/vendor/github.com/ClickHouse/ch-go/proto/profile_events.go b/vendor/github.com/ClickHouse/ch-go/proto/profile_events.go index 734b7760..32ccb467 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/profile_events.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/profile_events.go @@ -50,7 +50,7 @@ func (d *ProfileEvents) Result() Results { } } -//go:generate go run github.com/dmarkham/enumer -type ProfileEventType -trimprefix Profile -output profile_enum.go +//go:generate go run github.com/dmarkham/enumer -type ProfileEventType -trimprefix Profile -text -json -output profile_enum.go type ProfileEventType byte @@ -61,10 +61,10 @@ const ( // ProfileEvent is detailed profiling event from Server. type ProfileEvent struct { - ThreadID uint64 - Host string - Time time.Time - Type ProfileEventType - Name string - Value int64 + Type ProfileEventType `json:"type"` + Name string `json:"name"` + Value int64 `json:"value"` + Host string `json:"host_name"` + Time time.Time `json:"current_time"` + ThreadID uint64 `json:"thread_id"` } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/proto.go b/vendor/github.com/ClickHouse/ch-go/proto/proto.go index 23258026..a68ce670 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/proto.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/proto.go @@ -4,5 +4,5 @@ package proto // Defaults for ClientHello. const ( Version = 54460 - Name = "go-faster/ch" + Name = "clickhouse/ch-go" ) diff --git a/vendor/github.com/ClickHouse/ch-go/proto/reader.go b/vendor/github.com/ClickHouse/ch-go/proto/reader.go index 4aa8f48d..fd521262 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/reader.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/reader.go @@ -87,8 +87,6 @@ func (r *Reader) UVarInt() (uint64, error) { return n, nil } -const maxStrSize = 10 * 1024 * 1024 // 10 MB - func (r *Reader) StrLen() (int, error) { n, err := r.Int() if err != nil { @@ -98,10 +96,6 @@ func (r *Reader) StrLen() (int, error) { if n < 0 { return 0, errors.Errorf("size %d is invalid", n) } - if n > maxStrSize { - // Protecting from possible OOM. - return 0, errors.Errorf("size %d too big (%d is maximum)", n, maxStrSize) - } return n, nil } diff --git a/vendor/github.com/ClickHouse/ch-go/proto/server_log.go b/vendor/github.com/ClickHouse/ch-go/proto/server_log.go index 917c8a8f..f6ba4128 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/server_log.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/server_log.go @@ -4,13 +4,13 @@ import "time" // Log from server. type Log struct { - Time time.Time - Host string - QueryID string - ThreadID uint64 - Priority int8 - Source string - Text string + QueryID string `json:"query_id"` + Source string `json:"source"` + Text string `json:"text"` + Time time.Time `json:"event_time"` + Host string `json:"host_name"` + ThreadID uint64 `json:"thread_id"` + Priority int8 `json:"priority"` } // Logs from ServerCodeLog packet. diff --git a/vendor/github.com/ClickHouse/ch-go/proto/slice_unsafe.go b/vendor/github.com/ClickHouse/ch-go/proto/slice_unsafe.go index 38a99368..7d9e90ee 100644 --- a/vendor/github.com/ClickHouse/ch-go/proto/slice_unsafe.go +++ b/vendor/github.com/ClickHouse/ch-go/proto/slice_unsafe.go @@ -1,4 +1,4 @@ -//go:build (amd64 || arm64) && !purego +//go:build (amd64 || arm64 || riscv64) && !purego package proto diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore b/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore index 2e33aef2..108aa399 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/.gitignore @@ -30,4 +30,9 @@ coverage.txt dev/* .run/** -vendor \ No newline at end of file +vendor + +**.tfstate* +.terraform.lock.hcl +**/.terraform* +pipeline.auto.tfvars diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md index 877b5d20..bcfe72fd 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/CHANGELOG.md @@ -1,3 +1,216 @@ +# v2.17.1, 2023-12-27 + +## What's Changed +### Fixes 🛠+* fix panic in contextWatchDog nil pointer check by @nityanandagohain in https://github.com/ClickHouse/clickhouse-go/pull/1168 + +## New Contributors +* @nityanandagohain made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1168 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.17.0...v2.17.1 + +# v2.17.0, 2023-12-21 + +## What's Changed +### Enhancements 🎉 +* Iterable ordered map alternative with improved performance by @hanjm in https://github.com/ClickHouse/clickhouse-go/pull/1152 +* Support bool alias type by @yogasw in https://github.com/ClickHouse/clickhouse-go/pull/1156 +### Fixes 🛠+* Update README - mention HTTP protocol usable only with `database/sql` interface by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1160 +* Fix README example for Debugf by @aramperes in https://github.com/ClickHouse/clickhouse-go/pull/1153 + +## New Contributors +* @yogasw made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1156 +* @aramperes made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1153 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.16.0...v2.17.0 + +# v2.16.0, 2023-12-01 + +## What's Changed +### Enhancements 🎉 +* Add sql.Valuer support for all types by @deankarn in https://github.com/ClickHouse/clickhouse-go/pull/1144 +### Fixes 🛠+* Fix DateTime64 range to actual supported range per ClickHouse documentation by @phil-schreiber in https://github.com/ClickHouse/clickhouse-go/pull/1148 + +## New Contributors +* @phil-schreiber made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1148 +* @deankarn made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1144 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.15.0...v2.16.0 + +# v2.14.3, 2023-10-12 + +## What's Changed +### Fixes 🛠+* Fix insertion of empty map into JSON column by using _dummy subcolumn by @leodido in https://github.com/ClickHouse/clickhouse-go/pull/1116 +### Other Changes 🛠 +* chore: specify method field on compression in example by @rdaniels6813 in https://github.com/ClickHouse/clickhouse-go/pull/1111 +* chore: remove extra error checks by @rutaka-n in https://github.com/ClickHouse/clickhouse-go/pull/1095 + +## New Contributors +* @leodido made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1116 +* @rdaniels6813 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1111 +* @rutaka-n made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1095 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.2...v2.14.3 + +# v2.14.2, 2023-10-04 + +## What's Changed +### Fixes 🛠+* Fix: Block stream read process would be terminated by empty block with zero rows by @crisismaple in https://github.com/ClickHouse/clickhouse-go/pull/1104 +* Free compressor's buffer when FreeBufOnConnRelease enabled by @cergxx in https://github.com/ClickHouse/clickhouse-go/pull/1100 +* Fix truncate ` for HTTP adapter by @beck917 in https://github.com/ClickHouse/clickhouse-go/pull/1103 +### Other Changes 🛠 +* docs: update readme.md by @rfyiamcool in https://github.com/ClickHouse/clickhouse-go/pull/1068 +* Remove dependency on github.com/satori/go.uuid by @srikanthccv in https://github.com/ClickHouse/clickhouse-go/pull/1085 + +## New Contributors +* @rfyiamcool made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1068 +* @beck917 made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1103 +* @srikanthccv made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1085 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.1...v2.14.2 + +# v2.14.1, 2023-09-14 + +## What's Changed +### Enhancements 🎉 +* parseDSN: support connection pool settings (#1082) by @hanjm in https://github.com/ClickHouse/clickhouse-go/pull/1084 + +## New Contributors +* @hanjm made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1084 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.14.0...v2.14.1 + +# v2.14.0, 2023-09-12 + +## What's Changed +### Enhancements 🎉 +* Add FreeBufOnConnRelease to clickhouse.Options by @cergxx in https://github.com/ClickHouse/clickhouse-go/pull/1091 +* Improving object allocation for (positional) parameter binding by @mdonkers in https://github.com/ClickHouse/clickhouse-go/pull/1092 +### Fixes 🛠+* Fix escaping double quote in SQL statement in prepare batch by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1083 +### Other Changes 🛠 +* Update Go & ClickHouse versions by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1079 +* Return status code from any http error by @RoryCrispin in https://github.com/ClickHouse/clickhouse-go/pull/1090 +* tests: fix dropped error by @alrs in https://github.com/ClickHouse/clickhouse-go/pull/1081 +* chore: unnecessary use of fmt.Sprintf by @testwill in https://github.com/ClickHouse/clickhouse-go/pull/1080 +* Run CI on self hosted runner by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1094 + +## New Contributors +* @cergxx made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1091 +* @alrs made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1081 +* @testwill made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1080 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.4...v2.14 + +# v2.13.4, 2023-08-30 + +## What's Changed +### Fixes 🛠+* fix(proto): add TCP protocol version in query packet by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1077 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.3...v2.13.4 + +# v2.13.3, 2023-08-23 + +## What's Changed +### Fixes 🛠+* fix(column.json): fix bool type handling by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1073 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.2...v2.13.3 + +# v2.13.2, 2023-08-18 + +## What's Changed +### Fixes 🛠+* fix: update ch-go to remove string length limit by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1071 +### Other Changes 🛠 +* Test against latest and head CH by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1060 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.1...v2.13.2 + +# v2.13.1, 2023-08-17 + +## What's Changed +### Fixes 🛠+* fix: native format Date32 representation by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1069 + + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.13.0...v2.13.1 + +# v2.13.0, 2023-08-10 + +## What's Changed +### Enhancements 🎉 +* Support scan from uint8 to bool by @ValManP in https://github.com/ClickHouse/clickhouse-go/pull/1051 +* Binding arguments for AsyncInsert interface by @mdonkers in https://github.com/ClickHouse/clickhouse-go/pull/1052 +* Batch rows count API by @EpicStep in https://github.com/ClickHouse/clickhouse-go/pull/1063 +* Implement release connection in batch by @EpicStep in https://github.com/ClickHouse/clickhouse-go/pull/1062 +### Other Changes 🛠 +* Restore test against CH 23.7 by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1059 + +## New Contributors +* @ValManP made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1051 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.12.1...v2.13.0 + +# v2.12.1, 2023-08-02 + +## What's Changed +### Fixes 🛠+* Fix InsertAsync typo in docs by @et in https://github.com/ClickHouse/clickhouse-go/pull/1044 +* Fix panic and releasing in batch column by @EpicStep in https://github.com/ClickHouse/clickhouse-go/pull/1055 +* Docs/changelog fixes by @jmaicher in https://github.com/ClickHouse/clickhouse-go/pull/1046 +* Clarify error message re custom serializaion support by @RoryCrispin in https://github.com/ClickHouse/clickhouse-go/pull/1056 +* Fix send query on batch retry by @EpicStep in https://github.com/ClickHouse/clickhouse-go/pull/1045 +### Other Changes 🛠 +* Update ClickHouse versions by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1054 + +## New Contributors +* @et made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1044 +* @EpicStep made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1055 +* @jmaicher made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1046 +* @RoryCrispin made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1056 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.12.0...v2.12.1 + +# v2.12.0, 2023-07-27 + +## What's Changed +### Enhancements 🎉 +* Implement elapsed time in query progress by @jkaflik in https://github.com/ClickHouse/clickhouse-go/pull/1039 +### Fixes 🛠+* Release connection slot on connection acquire timeout by @sentanos in https://github.com/ClickHouse/clickhouse-go/pull/1042 + +## New Contributors +* @sentanos made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1042 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.11.0...v2.12.0 + +# v2.11.0, 2023-07-20 + +## What's Changed +### Enhancements 🎉 +* Retry for batch API by @djosephsen in https://github.com/ClickHouse/clickhouse-go/pull/941 +### Fixes 🛠+* Fix startAutoCloseIdleConnections cause goroutine leak by @YenchangChan in https://github.com/ClickHouse/clickhouse-go/pull/1011 +* Fix netip.Addr pointer panic by @anjmao in https://github.com/ClickHouse/clickhouse-go/pull/1029 +### Other Changes 🛠 +* Git actions terraform by @gingerwizard in https://github.com/ClickHouse/clickhouse-go/pull/1023 + +## New Contributors +* @YenchangChan made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1011 +* @djosephsen made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/941 +* @anjmao made their first contribution in https://github.com/ClickHouse/clickhouse-go/pull/1029 + +**Full Changelog**: https://github.com/ClickHouse/clickhouse-go/compare/v2.10.1...v2.11.0 + # v2.10.1, 2023-06-06 ## What's Changed diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md index b6265069..ef08eb0a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/README.md @@ -22,6 +22,7 @@ The client is tested against the currently [supported versions](https://github.c |----------------|-----------------| | => 2.0 <= 2.2 | 1.17, 1.18 | | >= 2.3 | 1.18.4+, 1.19 | +| >= 2.14 | 1.20, 1.21 | ## Key features @@ -34,6 +35,7 @@ The client is tested against the currently [supported versions](https://github.c * Connection pool * Failover and load balancing * [Bulk write support](examples/clickhouse_api/batch.go) (for `database/sql` [use](examples/std/batch.go) `begin->prepare->(in loop exec)->commit`) +* [PrepareBatch options](#preparebatch-options) * [AsyncInsert](benchmark/v2/write-async/main.go) (more details in [Async insert](#async-insert) section) * Named and numeric placeholders support * LZ4/ZSTD compression support @@ -74,7 +76,7 @@ Support for the ClickHouse protocol advanced features using `Context`: }, Debug: true, Debugf: func(format string, v ...any) { - fmt.Printf(format, v) + fmt.Printf(format+"\n", v...) }, Settings: clickhouse.Settings{ "max_execution_time": 60, @@ -124,7 +126,7 @@ conn := clickhouse.OpenDB(&clickhouse.Options{ }, DialTimeout: time.Second * 30, Compression: &clickhouse.Compression{ - clickhouse.CompressionLZ4, + Method: clickhouse.CompressionLZ4, }, Debug: true, BlockBufferSize: 10, @@ -179,7 +181,7 @@ clickhouse://username:password@host1:9000,host2:9000/database?dial_timeout=200ms The native format can be used over the HTTP protocol. This is useful in scenarios where users need to proxy traffic e.g. using [ChProxy](https://www.chproxy.org/) or via load balancers. -This can be achieved by modifying the DSN to specify the http protocol. +This can be achieved by modifying the DSN to specify the HTTP protocol. ```sh http://host1:8123,host2:8123/database?dial_timeout=200ms&max_execution_time=60 @@ -206,6 +208,8 @@ conn := clickhouse.OpenDB(&clickhouse.Options{ }) ``` +**Note**: using HTTP protocol is possible only with `database/sql` interface. + ## Compression ZSTD/LZ4 compression is supported over native and http protocols. This is performed column by column at a block level and is only used for inserts. Compression buffer size is set as `MaxCompressionBuffer` option. @@ -270,7 +274,7 @@ Usage examples for [native API](examples/clickhouse_api/client_info.go) and [dat ## Async insert -[Asynchronous insert](https://clickhouse.com/docs/en/optimize/asynchronous-inserts#enabling-asynchronous-inserts) is supported via dedicated `InsertAsync` method. This allows to insert data with a non-blocking call. +[Asynchronous insert](https://clickhouse.com/docs/en/optimize/asynchronous-inserts#enabling-asynchronous-inserts) is supported via dedicated `AsyncInsert` method. This allows to insert data with a non-blocking call. Effectively, it controls a `async_insert` setting for the query. ### Using with batch API @@ -281,6 +285,11 @@ HTTP protocol supports batching. It can be enabled by setting `async_insert` whe For more details please see [asynchronous inserts](https://clickhouse.com/docs/en/optimize/asynchronous-inserts#enabling-asynchronous-inserts) documentation. +## PrepareBatch options + +Available options: +- [WithReleaseConnection](examples/clickhouse_api/batch_release_connection.go) - after PrepareBatch connection will be returned to the pool. It can help you make a long-lived batch. + ## Benchmark | [V1 (READ)](benchmark/v1/read/main.go) | [V2 (READ) std](benchmark/v2/read/main.go) | [V2 (READ) clickhouse API](benchmark/v2/read-native/main.go) | @@ -305,6 +314,7 @@ go get -u github.com/ClickHouse/clickhouse-go/v2 ### native interface * [batch](examples/clickhouse_api/batch.go) +* [batch with release connection](examples/clickhouse_api/batch_release_connection.go) * [async insert](examples/clickhouse_api/async.go) * [batch struct](examples/clickhouse_api/append_struct.go) * [columnar](examples/clickhouse_api/columnar_insert.go) @@ -312,7 +322,7 @@ go get -u github.com/ClickHouse/clickhouse-go/v2 * [query parameters](examples/clickhouse_api/query_parameters.go) (deprecated in favour of native query parameters) * [bind params](examples/clickhouse_api/bind.go) (deprecated in favour of native query parameters) * [client info](examples/clickhouse_api/client_info.go) -* + ### std `database/sql` interface * [batch](examples/std/batch.go) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md b/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md index 7d01a920..6712539e 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/TYPES.md @@ -8,75 +8,75 @@ This effort is ongoing and can be seperated in to insertion (`Append`/`AppendRow All types can be inserted as a value or pointer. -| | **ClickHouse Type** | String | Decimal | Boolean | FixedString | UInt8 | UInt16 | UInt32 | UInt64 | UInt128 | UInt256 | Int8 | Int16 | Int32 | Int64 | Int128 | Int256 | Float32 | Float64 | UUID | Date | Date32 | DateTime | DateTime64 | Enum8 | Enum16 | Point | Ring | Polygon | MultiPolygon | -|------------------|---------------------|--------|---------|---------|-------------|-------|--------|--------|--------|---------|---------|------|-------|-------|-------|--------|--------|---------|---------|------|------|--------|----------|------------|-------|--------|-------|------|---------|--------------| -| **Golang Type** | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| uint | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| unit64 | | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | -| uint32 | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | -| uint16 | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | -| uint8 | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | -| int | | | | | | | | | | | | | | | | | | | | | | | | | X | X | | | | | -| int64 | | | | | | | | | | | | | | | X | | | | | | | | X | X | | | | | | | -| int32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | -| int16 | | | | | | | | | | | | | X | | | | | | | | | | | | | X | | | | | -| int8 | | | | | | | | | | | | X | | | | | | | | | | | | | X | | | | | | -| float32 | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | | -| float64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | -| string | | X | | | X | | | | | | | | | | | | | | | X | X | X | X | X | X | X | | | | | -| boolean | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | -| time.Time | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | -| big.Int | | | | | | | | | | X | X | | | | | X | X | | | | | | | | | | | | | | -| decimal.Decimal | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| uuid.UUID | | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | -| orb.Point | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | | -| orb.Polygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | -| orb.Ring | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | -| orb.MultiPolygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | -| []byte | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | - | fmt.Stringer | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| sql.NullString | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| sql.NullTime | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | -| sql.NullFloat64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | -| sql.NullInt64 | | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | -| sql.NullInt32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | -| sql.NullInt16 | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | -| sql.NullBool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | +| | **ClickHouse Type** | String | Decimal | Bool | FixedString | UInt8 | UInt16 | UInt32 | UInt64 | UInt128 | UInt256 | Int8 | Int16 | Int32 | Int64 | Int128 | Int256 | Float32 | Float64 | UUID | Date | Date32 | DateTime | DateTime64 | Enum8 | Enum16 | Point | Ring | Polygon | MultiPolygon | +|---------------|---------------------|--------|---------|------|-------------|-------|--------|--------|--------|---------|---------|------|-------|-------|-------|--------|--------|---------|---------|------|------|--------|----------|------------|-------|--------|-------|------|---------|--------------| +| **Golang Type** | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| uint | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| unit64 | | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | +| uint32 | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | +| uint16 | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | +| uint8 | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | +| int | | | | | | | | | | | | | | | | | | | | | | | | | X | X | | | | | +| int64 | | | | | | | | | | | | | | | X | | | | | | | | X | X | | | | | | | +| int32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | +| int16 | | | | | | | | | | | | | X | | | | | | | | | | | | | X | | | | | +| int8 | | | | | | | | | | | | X | | | | | | | | | | | | | X | | | | | | +| float32 | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | | +| float64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | +| string | | X | | | X | | | | | | | | | | | | | | | X | X | X | X | X | X | X | | | | | +| bool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | +| time.Time | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | +| big.Int | | | | | | | | | | X | X | | | | | X | X | | | | | | | | | | | | | | +| decimal.Decimal | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| uuid.UUID | | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | +| orb.Point | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | | +| orb.Polygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | +| orb.Ring | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | +| orb.MultiPolygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | +| []byte | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | + | fmt.Stringer | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| sql.NullString | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| sql.NullTime | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | +| sql.NullFloat64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | +| sql.NullInt64 | | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | +| sql.NullInt32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | +| sql.NullInt16 | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | +| sql.NullBool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | ## Scan Support All types can be read into a pointer or pointer to a pointer. -| | **ClickHouse Type** | String | Decimal | Boolean | FixedString | UInt8 | UInt16 | UInt32 | UInt64 | UInt128 | UInt256 | Int8 | Int16 | Int32 | Int64 | Int128 | Int256 | Float32 | Float64 | UUID | Date | Date32 | DateTime | DateTime64 | Enum8 | Enum16 | Point | Ring | Polygon | MultiPolygon | -|------------------|---------------------|--------|---------|---------|-------------|-------|--------|--------|--------|---------|---------|------|-------|-------|-------|--------|--------|---------|---------|------|------|--------|----------|------------|-------|--------|-------|------|---------|--------------| -| **Golang Type** | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| uint | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| unit64 | | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | -| uint32 | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | -| uint16 | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | -| uint8 | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | -| int | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| int64 | | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | -| int32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | -| int16 | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | -| int8 | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | | -| float32 | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | | -| float64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | -| string | | X | | | X | | | | | | | | | | | | | | | X | | | | | X | X | | | | | -| boolean | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | -| time.Time | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | -| big.Int | | | | | | | | | | X | X | | | | | X | X | | | | | | | | | | | | | | -| decimal.Decimal | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| uuid.UUID | | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | -| orb.Point | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | | -| orb.Polygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | -| orb.Ring | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | -| orb.MultiPolygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | -| sql.Scan | | | | | | | | | | | | | | | | | | | | X | X | X | X | X | | | | | | | -| sql.NullString | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | | -| sql.NullTime | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | -| sql.NullFloat64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | -| sql.NullInt64 | | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | -| sql.NullInt32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | -| sql.NullInt16 | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | -| sql.NullBool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | +| | **ClickHouse Type** | String | Decimal | Bool | FixedString | UInt8 | UInt16 | UInt32 | UInt64 | UInt128 | UInt256 | Int8 | Int16 | Int32 | Int64 | Int128 | Int256 | Float32 | Float64 | UUID | Date | Date32 | DateTime | DateTime64 | Enum8 | Enum16 | Point | Ring | Polygon | MultiPolygon | +|---------------|---------------------|--------|---------|------|-------------|-------|--------|--------|--------|---------|---------|------|-------|-------|-------|--------|--------|---------|---------|------|------|--------|----------|------------|-------|--------|-------|------|---------|--------------| +| **Golang Type** | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| uint | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| unit64 | | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | +| uint32 | | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | +| uint16 | | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | +| uint8 | | | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | +| int | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| int64 | | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | +| int32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | +| int16 | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | +| int8 | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | | +| float32 | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | | +| float64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | +| string | | X | | | X | | | | | | | | | | | | | | | X | | | | | X | X | | | | | +| bool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | +| time.Time | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | +| big.Int | | | | | | | | | | X | X | | | | | X | X | | | | | | | | | | | | | | +| decimal.Decimal | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| uuid.UUID | | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | +| orb.Point | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | | +| orb.Polygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | +| orb.Ring | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | | | +| orb.MultiPolygon | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | X | +| sql.Scan | | | | | | | | | | | | | | | | | | | | X | X | X | X | X | | | | | | | +| sql.NullString | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | | | +| sql.NullTime | | | | | | | | | | | | | | | | | | | | | X | X | X | X | | | | | | | +| sql.NullFloat64 | | | | | | | | | | | | | | | | | | | X | | | | | | | | | | | | +| sql.NullInt64 | | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | +| sql.NullInt32 | | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | +| sql.NullInt16 | | | | | | | | | | | | | X | | | | | | | | | | | | | | | | | | +| sql.NullBool | | | | X | | | | | | | | | | | | | | | | | | | | | | | | | | | diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go index 32ffcc9b..7f5ac393 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/bind.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" ) @@ -109,41 +110,64 @@ func checkAllNamedArguments(args ...any) (bool, error) { return haveNamed, nil } -var bindPositionCharRe = regexp.MustCompile(`[?]`) - func bindPositional(tz *time.Location, query string, args ...any) (_ string, err error) { var ( - unbind = make(map[int]struct{}) - params = make([]string, len(args)) + lastMatchIndex = -1 // Position of previous match for copying + argIndex = 0 // Index for the argument at current position + buf = make([]byte, 0, len(query)) + unbindCount = 0 // Number of positional arguments that couldn't be matched ) - for i, v := range args { - if fn, ok := v.(std_driver.Valuer); ok { - if v, err = fn.Value(); err != nil { - return "", nil + + for i := 0; i < len(query); i++ { + // It's fine looping through the query string as bytes, because the (fixed) characters we're looking for + // are in the ASCII range to won't take up more than one byte. + if query[i] == '?' { + if i > 0 && query[i-1] == '\\' { + // Copy all previous index to here characters + buf = append(buf, query[lastMatchIndex+1:i-1]...) + buf = append(buf, '?') + } else { + // Copy all previous index to here characters + buf = append(buf, query[lastMatchIndex+1:i]...) + + // Append the argument value + if argIndex < len(args) { + v := args[argIndex] + if fn, ok := v.(std_driver.Valuer); ok { + if v, err = fn.Value(); err != nil { + return "", nil + } + } + + value, err := format(tz, Seconds, v) + if err != nil { + return "", err + } + + buf = append(buf, value...) + argIndex++ + } else { + unbindCount++ + } } - } - params[i], err = format(tz, Seconds, v) - if err != nil { - return "", err + + lastMatchIndex = i } } - i := 0 - query = bindPositionalRe.ReplaceAllStringFunc(query, func(n string) string { - if i >= len(params) { - unbind[i] = struct{}{} - return "" - } - val := params[i] - i++ - return bindPositionCharRe.ReplaceAllStringFunc(n, func(m string) string { - return val - }) - }) - for param := range unbind { - return "", fmt.Errorf("have no arg for param ? at position %d", param) + + // If there were no replacements, quick return without copying the string + if lastMatchIndex < 0 { + return query, nil + } + + // Append the remainder + buf = append(buf, query[lastMatchIndex+1:]...) + + if unbindCount > 0 { + return "", fmt.Errorf("have no arg for param ? at last %d positions", unbindCount) } - // replace \? escape sequence - return strings.ReplaceAll(query, "\\?", "?"), nil + + return string(buf), nil } func bindNumeric(tz *time.Location, query string, args ...any) (_ string, err error) { @@ -243,9 +267,11 @@ func formatTime(tz *time.Location, scale TimeUnit, value time.Time) (string, err return fmt.Sprintf("toDateTime64('%s', %d, '%s')", value.Format(fmt.Sprintf("2006-01-02 15:04:05.%0*d", int(scale*3), 0)), int(scale*3), value.Location().String()), nil } +var stringQuoteReplacer = strings.NewReplacer(`\`, `\\`, `'`, `\'`) + func format(tz *time.Location, scale TimeUnit, v any) (string, error) { quote := func(v string) string { - return "'" + strings.NewReplacer(`\`, `\\`, `'`, `\'`).Replace(v) + "'" + return "'" + stringQuoteReplacer.Replace(v) + "'" } switch v := v.(type) { case nil: @@ -279,6 +305,39 @@ func format(tz *time.Location, scale TimeUnit, v any) (string, error) { return fmt.Sprintf("[%s]", val), nil case fmt.Stringer: return quote(v.String()), nil + case column.OrderedMap: + values := make([]string, 0) + for key := range v.Keys() { + name, err := format(tz, scale, key) + if err != nil { + return "", err + } + value, _ := v.Get(key) + val, err := format(tz, scale, value) + if err != nil { + return "", err + } + values = append(values, fmt.Sprintf("%s, %s", name, val)) + } + + return "map(" + strings.Join(values, ", ") + ")", nil + case column.IterableOrderedMap: + values := make([]string, 0) + iter := v.Iterator() + for iter.Next() { + key, value := iter.Key(), iter.Value() + name, err := format(tz, scale, key) + if err != nil { + return "", err + } + val, err := format(tz, scale, value) + if err != nil { + return "", err + } + values = append(values, fmt.Sprintf("%s, %s", name, val)) + } + + return "map(" + strings.Join(values, ", ") + ")", nil } switch v := reflect.ValueOf(v); v.Kind() { case reflect.String: diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go index ddb542cc..a565247a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse.go @@ -24,11 +24,13 @@ import ( "sync/atomic" "time" + _ "time/tzdata" + + chproto "github.com/ClickHouse/ch-go/proto" "github.com/ClickHouse/clickhouse-go/v2/contributors" "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" - _ "time/tzdata" ) type Conn = driver.Conn @@ -43,10 +45,12 @@ type ( var ( ErrBatchInvalid = errors.New("clickhouse: batch is invalid. check appended data is correct") ErrBatchAlreadySent = errors.New("clickhouse: batch has already been sent") + ErrBatchNotSent = errors.New("clickhouse: invalid retry, batch not sent yet") ErrAcquireConnTimeout = errors.New("clickhouse: acquire conn timeout. you can increase the number of max open conn or the dial timeout") ErrUnsupportedServerRevision = errors.New("clickhouse: unsupported server revision") ErrBindMixedParamsFormats = errors.New("clickhouse [bind]: mixed named, numeric or positional parameters") ErrAcquireConnNoAddress = errors.New("clickhouse: no valid address supplied") + ErrServerUnexpectedData = errors.New("code: 101, message: Unexpected packet Data received from client") ) type OpError struct { @@ -82,6 +86,7 @@ func Open(opt *Options) (driver.Conn, error) { opt: o, idle: make(chan *connect, o.MaxIdleConns), open: make(chan struct{}, o.MaxOpenConns), + exit: make(chan struct{}), } go conn.startAutoCloseIdleConnections() return conn, nil @@ -91,6 +96,7 @@ type clickhouse struct { opt *Options idle chan *connect open chan struct{} + exit chan struct{} connID int64 } @@ -148,24 +154,34 @@ func (ch *clickhouse) Exec(ctx context.Context, query string, args ...any) error return nil } -func (ch *clickhouse) PrepareBatch(ctx context.Context, query string) (driver.Batch, error) { +func (ch *clickhouse) PrepareBatch(ctx context.Context, query string, opts ...driver.PrepareBatchOption) (driver.Batch, error) { conn, err := ch.acquire(ctx) if err != nil { return nil, err } - batch, err := conn.prepareBatch(ctx, query, ch.release) + batch, err := conn.prepareBatch(ctx, query, getPrepareBatchOptions(opts...), ch.release, ch.acquire) if err != nil { return nil, err } return batch, nil } -func (ch *clickhouse) AsyncInsert(ctx context.Context, query string, wait bool) error { +func getPrepareBatchOptions(opts ...driver.PrepareBatchOption) driver.PrepareBatchOptions { + var options driver.PrepareBatchOptions + + for _, opt := range opts { + opt(&options) + } + + return options +} + +func (ch *clickhouse) AsyncInsert(ctx context.Context, query string, wait bool, args ...any) error { conn, err := ch.acquire(ctx) if err != nil { return err } - if err := conn.asyncInsert(ctx, query, wait); err != nil { + if err := conn.asyncInsert(ctx, query, wait, args...); err != nil { ch.release(conn, err) return err } @@ -253,6 +269,10 @@ func (ch *clickhouse) acquire(ctx context.Context) (conn *connect, err error) { } select { case <-timer.C: + select { + case <-ch.open: + default: + } return nil, ErrAcquireConnTimeout case conn := <-ch.idle: if conn.isBad() { @@ -287,6 +307,8 @@ func (ch *clickhouse) startAutoCloseIdleConnections() { select { case <-ticker.C: ch.closeIdleExpired() + case <-ch.exit: + return } } } @@ -325,6 +347,10 @@ func (ch *clickhouse) release(conn *connect, err error) { conn.close() return } + if ch.opt.FreeBufOnConnRelease { + conn.buffer = new(chproto.Buffer) + conn.compressor.Data = nil + } select { case ch.idle <- conn: default: @@ -338,6 +364,7 @@ func (ch *clickhouse) Close() error { case c := <-ch.idle: c.close() default: + ch.exit <- struct{}{} return nil } } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go index f9ddcb34..d64b04eb 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_options.go @@ -138,6 +138,7 @@ type Options struct { MaxIdleConns int // default 5 ConnMaxLifetime time.Duration // default 1 hour ConnOpenStrategy ConnOpenStrategy + FreeBufOnConnRelease bool // drop preserved memory buffer after each query HttpHeaders map[string]string // set additional headers on HTTP requests HttpUrlPath string // set additional URL path for HTTP requests BlockBufferSize uint8 // default 2 - can be overwritten on query @@ -265,6 +266,24 @@ func (o *Options) fromDSN(in string) error { case "round_robin": o.ConnOpenStrategy = ConnOpenRoundRobin } + case "max_open_conns": + maxOpenConns, err := strconv.Atoi(params.Get(v)) + if err != nil { + return errors.Wrap(err, "max_open_conns invalid value") + } + o.MaxOpenConns = maxOpenConns + case "max_idle_conns": + maxIdleConns, err := strconv.Atoi(params.Get(v)) + if err != nil { + return errors.Wrap(err, "max_idle_conns invalid value") + } + o.MaxIdleConns = maxIdleConns + case "conn_max_lifetime": + connMaxLifetime, err := time.ParseDuration(params.Get(v)) + if err != nil { + return errors.Wrap(err, "conn_max_lifetime invalid value") + } + o.ConnMaxLifetime = connMaxLifetime case "username": o.Auth.Username = params.Get(v) case "password": diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go index 150e82bf..698905e7 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_rows.go @@ -52,7 +52,6 @@ next: r.err = err return false } - goto next case block := <-r.stream: if block == nil { return false @@ -63,6 +62,7 @@ next: } r.row, r.block = 0, block } + goto next } r.row++ return r.row <= r.block.Rows() diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go index a603b4c9..7ab67066 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/clickhouse_std.go @@ -46,7 +46,7 @@ type stdConnOpener struct { func (o *stdConnOpener) Driver() driver.Driver { var debugf = func(format string, v ...any) {} if o.opt.Debug { - debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse-std] "), 0).Printf + debugf = log.New(os.Stdout, "[clickhouse-std] ", 0).Printf } return &stdDriver{debugf: debugf} } @@ -125,7 +125,7 @@ func Connector(opt *Options) driver.Connector { var debugf = func(format string, v ...any) {} if o.Debug { - debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse-std][opener] "), 0).Printf + debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf } return &stdConnOpener{ opt: o, @@ -149,7 +149,7 @@ func OpenDB(opt *Options) *sql.DB { settings = append(settings, "SetConnMaxLifetime") } if opt.Debug { - debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse-std][opener] "), 0).Printf + debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf } if len(settings) != 0 { return sql.OpenDB(&stdConnOpener{ @@ -170,8 +170,8 @@ type stdConnect interface { query(ctx context.Context, release func(*connect, error), query string, args ...any) (*rows, error) exec(ctx context.Context, query string, args ...any) error ping(ctx context.Context) (err error) - prepareBatch(ctx context.Context, query string, release func(*connect, error)) (ldriver.Batch, error) - asyncInsert(ctx context.Context, query string, wait bool) error + prepareBatch(ctx context.Context, query string, options ldriver.PrepareBatchOptions, release func(*connect, error), acquire func(context.Context) (*connect, error)) (ldriver.Batch, error) + asyncInsert(ctx context.Context, query string, wait bool, args ...any) error } type stdDriver struct { @@ -189,7 +189,7 @@ func (std *stdDriver) Open(dsn string) (_ driver.Conn, err error) { o := opt.setDefaults() var debugf = func(format string, v ...any) {} if o.Debug { - debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse-std][opener] "), 0).Printf + debugf = log.New(os.Stdout, "[clickhouse-std][opener] ", 0).Printf } o.ClientInfo.comment = []string{"database/sql"} return (&stdConnOpener{opt: o, debugf: debugf}).Connect(context.Background()) @@ -236,10 +236,7 @@ func (std *stdDriver) CheckNamedValue(nv *driver.NamedValue) error { return nil func (std *stdDriver) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { if options := queryOptions(ctx); options.async.ok { - if len(args) != 0 { - return nil, errors.New("clickhouse: you can't use parameters in an asynchronous insert") - } - return driver.RowsAffected(0), std.conn.asyncInsert(ctx, query, options.async.wait) + return driver.RowsAffected(0), std.conn.asyncInsert(ctx, query, options.async.wait, rebind(args)...) } if err := std.conn.exec(ctx, query, rebind(args)...); err != nil { if isConnBrokenError(err) { @@ -273,7 +270,7 @@ func (std *stdDriver) Prepare(query string) (driver.Stmt, error) { } func (std *stdDriver) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { - batch, err := std.conn.prepareBatch(ctx, query, func(*connect, error) {}) + batch, err := std.conn.prepareBatch(ctx, query, ldriver.PrepareBatchOptions{}, func(*connect, error) {}, func(context.Context) (*connect, error) { return nil, nil }) if err != nil { if isConnBrokenError(err) { std.debugf("PrepareContext got a fatal error, resetting connection: %v\n", err) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go index a4dff4b4..d7b7c242 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/client_info.go @@ -29,7 +29,7 @@ const ClientName = "clickhouse-go" const ( ClientVersionMajor = 2 - ClientVersionMinor = 10 + ClientVersionMinor = 17 ClientVersionPatch = 1 ClientTCPProtocolVersion = proto.DBMS_TCP_PROTOCOL_VERSION ) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go index 3bbccab1..5c2c34b6 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn.go @@ -102,8 +102,6 @@ func dial(ctx context.Context, addr string, num int, opt *Options) (*connect, er // warn only on the first connection in the pool if num == 1 && !resources.ClientMeta.IsSupportedClickHouseVersion(connect.server.Version) { - // send to debugger and console - fmt.Printf("WARNING: version %v of ClickHouse is not supported by this client\n", connect.server.Version) debugf("[handshake] WARNING: version %v of ClickHouse is not supported by this client - client supports %v", connect.server.Version, resources.ClientMeta.SupportedVersions()) } return connect, nil diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_async_insert.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_async_insert.go index a324bd1d..f37e6e60 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_async_insert.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_async_insert.go @@ -19,9 +19,10 @@ package clickhouse import ( "context" + "github.com/ClickHouse/clickhouse-go/v2/lib/proto" ) -func (c *connect) asyncInsert(ctx context.Context, query string, wait bool) error { +func (c *connect) asyncInsert(ctx context.Context, query string, wait bool, args ...any) error { options := queryOptions(ctx) { options.settings["async_insert"] = 1 @@ -30,6 +31,16 @@ func (c *connect) asyncInsert(ctx context.Context, query string, wait bool) erro options.settings["wait_for_async_insert"] = 1 } } + + if len(args) > 0 { + queryParamsProtocolSupport := c.revision >= proto.DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS + var err error + query, err = bindQueryOrAppendParameters(queryParamsProtocolSupport, &options, query, c.server.Timezone, args...) + if err != nil { + return err + } + } + if err := c.sendQuery(query, &options); err != nil { return err } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go index 193df4a4..38329900 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_batch.go @@ -20,12 +20,13 @@ package clickhouse import ( "context" "fmt" - "github.com/pkg/errors" "os" "regexp" "strings" "time" + "github.com/pkg/errors" + "github.com/ClickHouse/clickhouse-go/v2/lib/column" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "github.com/ClickHouse/clickhouse-go/v2/lib/proto" @@ -34,7 +35,7 @@ import ( var splitInsertRe = regexp.MustCompile(`(?i)\sVALUES\s*\(`) var columnMatch = regexp.MustCompile(`.*\((?P.+)\)$`) -func (c *connect) prepareBatch(ctx context.Context, query string, release func(*connect, error)) (driver.Batch, error) { +func (c *connect) prepareBatch(ctx context.Context, query string, opts driver.PrepareBatchOptions, release func(*connect, error), acquire func(context.Context) (*connect, error)) (driver.Batch, error) { //defer func() { // if err := recover(); err != nil { // fmt.Printf("panic occurred on %d:\n", c.num) @@ -46,7 +47,7 @@ func (c *connect) prepareBatch(ctx context.Context, query string, release func(* if len(colMatch) == 2 { columns = strings.Split(colMatch[1], ",") for i := range columns { - columns[i] = strings.Trim(strings.TrimSpace(columns[i]), "`") + columns[i] = strings.Trim(strings.TrimSpace(columns[i]), "`\"") } } if !strings.HasSuffix(strings.TrimSpace(strings.ToUpper(query)), "VALUES") { @@ -73,24 +74,35 @@ func (c *connect) prepareBatch(ctx context.Context, query string, release func(* if err = block.SortColumns(columns); err != nil { return nil, err } - return &batch{ + + b := &batch{ ctx: ctx, + query: query, conn: c, block: block, released: false, connRelease: release, + connAcquire: acquire, onProcess: onProcess, - }, nil + } + + if opts.ReleaseConnection { + b.release(b.closeQuery()) + } + + return b, nil } type batch struct { err error ctx context.Context + query string conn *connect - sent bool - released bool + sent bool // sent signalize that batch is send to ClickHouse. + released bool // released signalize that conn was returned to pool and can't be used. block *proto.Block connRelease func(*connect, error) + connAcquire func(context.Context) (*connect, error) onProcess *onProcess } @@ -144,12 +156,15 @@ func (b *batch) IsSent() bool { func (b *batch) Column(idx int) driver.BatchColumn { if len(b.block.Columns) <= idx { - b.release(nil) + err := &OpError{ + Op: "batch.Column", + Err: fmt.Errorf("invalid column index %d", idx), + } + + b.release(err) + return &batchColumn{ - err: &OpError{ - Op: "batch.Column", - Err: fmt.Errorf("invalid column index %d", idx), - }, + err: err, } } return &batchColumn{ @@ -163,27 +178,70 @@ func (b *batch) Column(idx int) driver.BatchColumn { } func (b *batch) Send() (err error) { + stopCW := contextWatchdog(b.ctx, func() { + // close TCP connection on context cancel. There is no other way simple way to interrupt underlying operations. + // as verified in the test, this is safe to do and cleanups resources later on + if b.conn != nil { + _ = b.conn.conn.Close() + } + }) + defer func() { + stopCW() b.sent = true b.release(err) }() - if b.sent { - return ErrBatchAlreadySent - } if b.err != nil { return b.err } + if b.sent || b.released { + if err = b.resetConnection(); err != nil { + return err + } + } if b.block.Rows() != 0 { if err = b.conn.sendData(b.block, ""); err != nil { + // there might be an error caused by context cancellation + // in this case we should return context error instead of net.OpError + if ctxErr := b.ctx.Err(); ctxErr != nil { + return ctxErr + } + return err } } - if err = b.conn.sendData(&proto.Block{}, ""); err != nil { + if err = b.closeQuery(); err != nil { + return err + } + return nil +} + +func (b *batch) resetConnection() (err error) { + // acquire a new conn + if b.conn, err = b.connAcquire(b.ctx); err != nil { + return err + } + + defer func() { + b.released = false + }() + + options := queryOptions(b.ctx) + if deadline, ok := b.ctx.Deadline(); ok { + b.conn.conn.SetDeadline(deadline) + defer b.conn.conn.SetDeadline(time.Time{}) + } + + if err = b.conn.sendQuery(b.query, &options); err != nil { + b.release(err) return err } - if err = b.conn.process(b.ctx, b.onProcess); err != nil { + + if _, err = b.conn.firstBlock(b.ctx, b.onProcess); err != nil { + b.release(err) return err } + return nil } @@ -194,6 +252,11 @@ func (b *batch) Flush() error { if b.err != nil { return b.err } + if b.released { + if err := b.resetConnection(); err != nil { + return err + } + } if b.block.Rows() != 0 { if err := b.conn.sendData(b.block, ""); err != nil { return err @@ -203,6 +266,22 @@ func (b *batch) Flush() error { return nil } +func (b *batch) Rows() int { + return b.block.Rows() +} + +func (b *batch) closeQuery() error { + if err := b.conn.sendData(&proto.Block{}, ""); err != nil { + return err + } + + if err := b.conn.process(b.ctx, b.onProcess); err != nil { + return err + } + + return nil +} + type batchColumn struct { err error batch driver.Batch @@ -211,13 +290,12 @@ type batchColumn struct { } func (b *batchColumn) Append(v any) (err error) { - if b.batch.IsSent() { - return ErrBatchAlreadySent - } if b.err != nil { - b.release(b.err) return b.err } + if b.batch.IsSent() { + return ErrBatchAlreadySent + } if _, err = b.column.Append(v); err != nil { b.release(err) return err @@ -226,13 +304,12 @@ func (b *batchColumn) Append(v any) (err error) { } func (b *batchColumn) AppendRow(v any) (err error) { - if b.batch.IsSent() { - return ErrBatchAlreadySent - } if b.err != nil { - b.release(b.err) return b.err } + if b.batch.IsSent() { + return ErrBatchAlreadySent + } if err = b.column.AppendRow(v); err != nil { b.release(err) return err diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go index 9c95249a..a5d9e9c2 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http.go @@ -27,10 +27,12 @@ import ( "fmt" "io" "io/ioutil" + "log" "mime/multipart" "net" "net/http" "net/url" + "os" "strings" "sync" "time" @@ -136,6 +138,15 @@ func (rw *HTTPReaderWriter) reset(pw *io.PipeWriter) io.WriteCloser { } func dialHttp(ctx context.Context, addr string, num int, opt *Options) (*httpConnect, error) { + var debugf = func(format string, v ...any) {} + if opt.Debug { + if opt.Debugf != nil { + debugf = opt.Debugf + } else { + debugf = log.New(os.Stdout, fmt.Sprintf("[clickhouse][conn=%d][%s]", num, addr), 0).Printf + } + } + if opt.scheme == "" { switch opt.Protocol { case HTTP: @@ -239,7 +250,7 @@ func dialHttp(ctx context.Context, addr string, num int, opt *Options) (*httpCon return nil, err } if !resources.ClientMeta.IsSupportedClickHouseVersion(version) { - fmt.Printf("WARNING: version %v of ClickHouse is not supported by this client\n", version) + debugf("WARNING: version %v of ClickHouse is not supported by this client\n", version) } } @@ -544,7 +555,7 @@ func (h *httpConnect) executeRequest(req *http.Request) (*http.Response, error) msg, err := h.readRawResponse(resp) if err != nil { - return nil, errors.Wrap(err, "clickhouse [execute]:: failed to read the response") + return nil, fmt.Errorf("clickhouse [execute]:: %d code: failed to read the response: %w", resp.StatusCode, err) } return nil, fmt.Errorf("clickhouse [execute]:: %d code: %s", resp.StatusCode, string(msg)) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go index 07dd3755..4a748de6 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_async_insert.go @@ -23,7 +23,7 @@ import ( "io/ioutil" ) -func (h *httpConnect) asyncInsert(ctx context.Context, query string, wait bool) error { +func (h *httpConnect) asyncInsert(ctx context.Context, query string, wait bool, args ...any) error { options := queryOptions(ctx) options.settings["async_insert"] = 1 @@ -31,6 +31,14 @@ func (h *httpConnect) asyncInsert(ctx context.Context, query string, wait bool) if wait { options.settings["wait_for_async_insert"] = 1 } + if len(args) > 0 { + var err error + query, err = bindQueryOrAppendParameters(true, &options, query, h.location, args...) + if err != nil { + return err + } + } + res, err := h.sendQuery(ctx, query, &options, h.headers) if res != nil { defer res.Body.Close() diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go index 5912bd37..d64faeb3 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_http_batch.go @@ -33,8 +33,9 @@ import ( // \x60 represents a backtick var httpInsertRe = regexp.MustCompile(`(?i)^INSERT INTO\s+\x60?([\w.^\(]+)\x60?\s*(\([^\)]*\))?`) -// release is ignored, because http used by std with empty release function -func (h *httpConnect) prepareBatch(ctx context.Context, query string, release func(*connect, error)) (driver.Batch, error) { +// release is ignored, because http used by std with empty release function. +// Also opts ignored because all options unused in http batch. +func (h *httpConnect) prepareBatch(ctx context.Context, query string, opts driver.PrepareBatchOptions, release func(*connect, error), acquire func(context.Context) (*connect, error)) (driver.Batch, error) { matches := httpInsertRe.FindStringSubmatch(query) if len(matches) < 3 { return nil, errors.New("cannot get table name from query") @@ -45,7 +46,7 @@ func (h *httpConnect) prepareBatch(ctx context.Context, query string, release fu colMatch := strings.TrimSuffix(strings.TrimPrefix(matches[2], "("), ")") rColumns = strings.Split(colMatch, ",") for i := range rColumns { - rColumns[i] = strings.TrimSpace(rColumns[i]) + rColumns[i] = strings.Trim(strings.TrimSpace(rColumns[i]), "`") } } query = "INSERT INTO " + tableName + " FORMAT Native" @@ -229,4 +230,8 @@ func (b *httpBatch) Send() (err error) { return err } +func (b *httpBatch) Rows() int { + return b.block.Rows() +} + var _ driver.Batch = (*httpBatch)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_send_query.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_send_query.go index a1323397..8897a8c7 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_send_query.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/conn_send_query.go @@ -27,16 +27,17 @@ func (c *connect) sendQuery(body string, o *QueryOptions) error { c.debugf("[send query] compression=%q %s", c.compression, body) c.buffer.PutByte(proto.ClientQuery) q := proto.Query{ - ClientName: c.opt.ClientInfo.String(), - ClientVersion: proto.Version{ClientVersionMajor, ClientVersionMinor, ClientVersionPatch}, //nolint:govet - ID: o.queryID, - Body: body, - Span: o.span, - QuotaKey: o.quotaKey, - Compression: c.compression != CompressionNone, - InitialAddress: c.conn.LocalAddr().String(), - Settings: c.settings(o.settings), - Parameters: parametersToProtoParameters(o.parameters), + ClientTCPProtocolVersion: ClientTCPProtocolVersion, + ClientName: c.opt.ClientInfo.String(), + ClientVersion: proto.Version{ClientVersionMajor, ClientVersionMinor, ClientVersionPatch}, //nolint:govet + ID: o.queryID, + Body: body, + Span: o.span, + QuotaKey: o.quotaKey, + Compression: c.compression != CompressionNone, + InitialAddress: c.conn.LocalAddr().String(), + Settings: c.settings(o.settings), + Parameters: parametersToProtoParameters(o.parameters), } if err := q.Encode(c.buffer, c.revision); err != nil { return err diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/context_watchdog.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/context_watchdog.go new file mode 100644 index 00000000..8c2257e4 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/context_watchdog.go @@ -0,0 +1,47 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package clickhouse + +import "context" + +// contextWatchdog is a helper function to run a callback when the context is done. +// it has a cancellation function to prevent the callback from running. +// Useful for interrupting some logic when the context is done, +// but you want to not bother about context cancellation if your logic is already done. +// Example: +// stopCW := contextWatchdog(ctx, func() { /* do something */ }) +// // do something else +// defer stopCW() +func contextWatchdog(ctx context.Context, callback func()) (cancel func()) { + exit := make(chan struct{}) + + go func() { + for { + select { + case <-exit: + return + case <-ctx.Done(): + callback() + } + } + }() + + return func() { + exit <- struct{}{} + } +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list b/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list index fc200be6..01d27626 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/contributors/list @@ -1,145 +1 @@ -Aaron Harlap -Abraham Adberstein -Aleksandr Petrukhin -Aleksandr Razumov -Alex Bocharov -Alex Yang -Alexander Chumakov -Alexander Obukhov -Alexey Milovidov -Alexey Palazhchenko -Alvaro Tuso -Andrey Ustinov -Antoine Toulme -Anton Kozlov -Ashish Gaurav -Ather Shu -Benjamin Rupp -Cem Sancak -Chao Wang -Chris Duncan -Daguang <28806852+DGuang21@users.noreply.github.com> -Dale McDiarmid -Dale Mcdiarmid -Damir Sayfutdinov -Dan Walters -Daniel Bershatsky -Danila Migalin -Danny.Dunn -Darío -Denis Gukov -Denis Krivak -Denys -Derek Perkins -Dmitry Markov -Dmitry Ponomarev -Dmitry Ponomarev -Egor.Gorlin -Eugene Formanenko -Evan Au -Ewan -Florian Lehner -Fredz <513317651@qq.com> -Félix Mattrat -Geoff Genz -Gregory Petrosyan -Ian McGraw -Ivan -Ivan Blinkov -Ivan Blinkov -Ivan Ivanov -Jake Sylvestre -Jakub Chábek -James Hartig -Jan Was -Jeehoon Kim -John Troy -Jon Aquino -Kirill Shvakov -Kuba Kaflik -LI Tao -LIU Chao <42240939+xiaochaoren1@users.noreply.github.com> -Louis -Luc Vieillescazes -LuoJi Zhu -Maksim Sokolnikov -Marek Vavrusa -Marek VavrusÌŒa -Marek VavruÅ¡a -Marek VavruÅ¡a -Mark Andrus Roberts -Max Justus Spransy -Michael Vigovsky -Michail Safronov -Miel Donkers -Mikhail Shustov -Nathan J Mehl <70606471+n-oden@users.noreply.github.com> -Nay Linn -Nikita Mikhaylov -Oleg Strokachuk -Oleksandr Nikitin -Richard Artoul -Robert Sköld -Robin Hahling -Ross Rothenstine -Ross Rothenstine -Selboo -Serge Klochkov <3175289+slvrtrn@users.noreply.github.com> -Sergei Sobolev -Sergey Melekhin -Stephane Moreau -Stephanie Hingtgen -Taras Matsyk -Thibault Deutsch -Tomasz Czubocha -Tsimafei Bredau -Varun Vasan V -Vespertinus -Vincent Bernat -Vitaly Orlov -Yury Korolev -Yury Yurochko -Zhehao Wu -a-dot -albertlockett -alex -anton troyanov -astduman <41344369+Astemirdum@users.noreply.github.com> -candiduslynx -chengzhi -chenlujjj <953546398@qq.com> -coldWater -count -daguang -daguang -dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> -derN3rd -dmitry kuzmin -fengberlin -gaetan.rizio -hexchain -hulb -ianmcgraw -ilker moral -jiyongwang -kshvakov -neverlee -nevseliev -ortyomka -pavel raskin -restrry -rtkaratekid <42547811+rtkaratekid@users.noreply.github.com> -sundy-li <543950155@qq.com> -vahid sohrabloo -vasily.popov -viktorzaharov -vl4deee11 <44677024+vl4deee11@users.noreply.github.com> -vl4deee11 -vl4deee11 -vladislav doster -vogrelord -vvoronin -yuankun -zxc111 -zxc9007 -æŽç›¼ +Nityananda Gohain diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go index ac907dd8..e2db1825 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array.go @@ -133,6 +133,14 @@ func (col *Array) Append(v any) (nulls []uint8, err error) { } func (col *Array) AppendRow(v any) error { + if col.depth == 1 { + // try to use reflection-free method. + return col.appendRowPlain(v) + } + return col.appendRowDefault(v) +} + +func (col *Array) appendRowDefault(v any) error { var elem reflect.Value switch v := v.(type) { case reflect.Value: @@ -155,19 +163,51 @@ func (col *Array) AppendRow(v any) error { return col.append(elem, 0) } +func appendRowPlain[T any](col *Array, arr []T) error { + col.appendOffset(0, uint64(len(arr))) + for _, item := range arr { + if err := col.values.AppendRow(item); err != nil { + return err + } + } + return nil +} + +func appendNullableRowPlain[T any](col *Array, arr []*T) error { + col.appendOffset(0, uint64(len(arr))) + for _, item := range arr { + var err error + if item == nil { + err = col.values.AppendRow(nil) + } else { + err = col.values.AppendRow(item) + } + if err != nil { + return err + } + } + return nil +} + func (col *Array) append(elem reflect.Value, level int) error { if level < col.depth { - offset := uint64(elem.Len()) - if ln := col.offsets[level].values.Rows(); ln != 0 { - offset += col.offsets[level].values.col.Row(ln - 1) - } - col.offsets[level].values.col.Append(offset) - for i := 0; i < elem.Len(); i++ { - if err := col.append(elem.Index(i), level+1); err != nil { - return err + switch elem.Kind() { + // reflect.Value.Len() & reflect.Value.Index() is called in `append` method which is only valid for + // Slice, Array and String that make sense here. + case reflect.Slice, reflect.Array, reflect.String: + col.appendOffset(level, uint64(elem.Len())) + for i := 0; i < elem.Len(); i++ { + if err := col.append(elem.Index(i), level+1); err != nil { + return err + } } + return nil + } + return &ColumnConverterError{ + Op: "AppendRow", + To: "Array", + From: fmt.Sprintf("%T", elem), } - return nil } if elem.Kind() == reflect.Ptr && elem.IsNil() { return col.values.AppendRow(nil) @@ -175,6 +215,13 @@ func (col *Array) append(elem reflect.Value, level int) error { return col.values.AppendRow(elem.Interface()) } +func (col *Array) appendOffset(level int, offset uint64) { + if ln := col.offsets[level].values.Rows(); ln != 0 { + offset += col.offsets[level].values.col.Row(ln - 1) + } + col.offsets[level].values.col.Append(offset) +} + func (col *Array) Decode(reader *proto.Reader, rows int) error { for _, offset := range col.offsets { if err := offset.values.col.DecodeColumn(reader, rows); err != nil { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array_gen.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array_gen.go new file mode 100644 index 00000000..eeba8e0f --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/array_gen.go @@ -0,0 +1,175 @@ +// Licensed to ClickHouse, Inc. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. ClickHouse, Inc. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by make codegen DO NOT EDIT. +// source: lib/column/codegen/array.tpl + +package column + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "github.com/ClickHouse/ch-go/proto" + "github.com/google/uuid" + "github.com/paulmach/orb" + "github.com/shopspring/decimal" + "math/big" + "net" + "net/netip" + "time" +) + +// appendRowPlain is a reflection-free realisation of append for plain arrays. +func (col *Array) appendRowPlain(v any) error { + switch tv := v.(type) { + case []float32: + return appendRowPlain(col, tv) + case []*float32: + return appendNullableRowPlain(col, tv) + case []float64: + return appendRowPlain(col, tv) + case []*float64: + return appendNullableRowPlain(col, tv) + case []int8: + return appendRowPlain(col, tv) + case []*int8: + return appendNullableRowPlain(col, tv) + case []int16: + return appendRowPlain(col, tv) + case []*int16: + return appendNullableRowPlain(col, tv) + case []int32: + return appendRowPlain(col, tv) + case []*int32: + return appendNullableRowPlain(col, tv) + case []int64: + return appendRowPlain(col, tv) + case []*int64: + return appendNullableRowPlain(col, tv) + case []uint8: + return appendRowPlain(col, tv) + case []*uint8: + return appendNullableRowPlain(col, tv) + case []uint16: + return appendRowPlain(col, tv) + case []*uint16: + return appendNullableRowPlain(col, tv) + case []uint32: + return appendRowPlain(col, tv) + case []*uint32: + return appendNullableRowPlain(col, tv) + case []uint64: + return appendRowPlain(col, tv) + case []*uint64: + return appendNullableRowPlain(col, tv) + case []string: + return appendRowPlain(col, tv) + case []*string: + return appendNullableRowPlain(col, tv) + case [][]byte: + return appendRowPlain(col, tv) + case []*[]byte: + return appendNullableRowPlain(col, tv) + case []sql.NullString: + return appendRowPlain(col, tv) + case []*sql.NullString: + return appendNullableRowPlain(col, tv) + case []int: + return appendRowPlain(col, tv) + case []*int: + return appendNullableRowPlain(col, tv) + case []uint: + return appendRowPlain(col, tv) + case []*uint: + return appendNullableRowPlain(col, tv) + case []big.Int: + return appendRowPlain(col, tv) + case []*big.Int: + return appendNullableRowPlain(col, tv) + case []decimal.Decimal: + return appendRowPlain(col, tv) + case []*decimal.Decimal: + return appendNullableRowPlain(col, tv) + case []bool: + return appendRowPlain(col, tv) + case []*bool: + return appendNullableRowPlain(col, tv) + case []sql.NullBool: + return appendRowPlain(col, tv) + case []*sql.NullBool: + return appendNullableRowPlain(col, tv) + case []time.Time: + return appendRowPlain(col, tv) + case []*time.Time: + return appendNullableRowPlain(col, tv) + case []sql.NullTime: + return appendRowPlain(col, tv) + case []*sql.NullTime: + return appendNullableRowPlain(col, tv) + case []uuid.UUID: + return appendRowPlain(col, tv) + case []*uuid.UUID: + return appendNullableRowPlain(col, tv) + case []netip.Addr: + return appendRowPlain(col, tv) + case []*netip.Addr: + return appendNullableRowPlain(col, tv) + case []net.IP: + return appendRowPlain(col, tv) + case []*net.IP: + return appendNullableRowPlain(col, tv) + case []proto.IPv6: + return appendRowPlain(col, tv) + case []*proto.IPv6: + return appendNullableRowPlain(col, tv) + case [][16]byte: + return appendRowPlain(col, tv) + case []*[16]byte: + return appendNullableRowPlain(col, tv) + case []orb.MultiPolygon: + return appendRowPlain(col, tv) + case []*orb.MultiPolygon: + return appendNullableRowPlain(col, tv) + case []orb.Point: + return appendRowPlain(col, tv) + case []*orb.Point: + return appendNullableRowPlain(col, tv) + case []orb.Polygon: + return appendRowPlain(col, tv) + case []*orb.Polygon: + return appendNullableRowPlain(col, tv) + case []orb.Ring: + return appendRowPlain(col, tv) + case []*orb.Ring: + return appendNullableRowPlain(col, tv) + default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Array", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.appendRowPlain(val) + } + return col.appendRowDefault(v) + } +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go index 08c3db63..e09c96ae 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bigint.go @@ -18,6 +18,7 @@ package column import ( + "database/sql/driver" "encoding/binary" "fmt" "github.com/ClickHouse/ch-go/proto" @@ -97,6 +98,18 @@ func (col *BigInt) Append(v any) (nulls []uint8, err error) { } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: string(col.chType), @@ -120,6 +133,18 @@ func (col *BigInt) AppendRow(v any) error { case nil: col.append(big.NewInt(0)) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: string(col.chType), diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bool.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bool.go index f99da947..3699a3cf 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bool.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/bool.go @@ -19,6 +19,7 @@ package column import ( "database/sql" + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -64,7 +65,7 @@ func (col *Bool) ScanRow(dest any, row int) error { case **bool: *d = new(bool) **d = col.row(row) - case *sql.NullBool: + case sql.Scanner: return d.Scan(col.row(row)) default: return &ColumnConverterError{ @@ -110,6 +111,18 @@ func (col *Bool) Append(v any) (nulls []uint8, err error) { col.Append(v[i]) } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Bool", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Bool", @@ -140,6 +153,18 @@ func (col *Bool) AppendRow(v any) error { } case nil: default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Bool", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "Bool", diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go index 79d15c40..d13781af 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/column_gen.go @@ -22,6 +22,7 @@ package column import ( "database/sql" + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "github.com/google/uuid" @@ -325,6 +326,20 @@ func (col *Float32) Append(v any) (nulls []uint8, err error) { } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Float32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Float32", @@ -348,7 +363,21 @@ func (col *Float32) AppendRow(v any) error { case nil: col.col.Append(0) default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Float32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(float32)) } else { return &ColumnConverterError{ @@ -453,6 +482,20 @@ func (col *Float64) Append(v any) (nulls []uint8, err error) { col.AppendRow(v[i]) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Float64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Float64", @@ -490,7 +533,21 @@ func (col *Float64) AppendRow(v any) error { col.col.Append(0) } default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Float64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(float64)) } else { return &ColumnConverterError{ @@ -605,6 +662,20 @@ func (col *Int8) Append(v any) (nulls []uint8, err error) { col.col.Append(val) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Int8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Int8", @@ -640,7 +711,21 @@ func (col *Int8) AppendRow(v any) error { } col.col.Append(val) default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(int8)) } else { return &ColumnConverterError{ @@ -745,6 +830,20 @@ func (col *Int16) Append(v any) (nulls []uint8, err error) { col.AppendRow(v[i]) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Int16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Int16", @@ -782,7 +881,21 @@ func (col *Int16) AppendRow(v any) error { col.col.Append(0) } default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(int16)) } else { return &ColumnConverterError{ @@ -887,6 +1000,20 @@ func (col *Int32) Append(v any) (nulls []uint8, err error) { col.AppendRow(v[i]) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Int32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Int32", @@ -924,7 +1051,21 @@ func (col *Int32) AppendRow(v any) error { col.col.Append(0) } default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(int32)) } else { return &ColumnConverterError{ @@ -1031,6 +1172,20 @@ func (col *Int64) Append(v any) (nulls []uint8, err error) { col.AppendRow(v[i]) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Int64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "Int64", @@ -1072,7 +1227,21 @@ func (col *Int64) AppendRow(v any) error { case *time.Duration: col.col.Append(int64(*v)) default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Int64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(int64)) } else { return &ColumnConverterError{ @@ -1121,6 +1290,13 @@ func (col *UInt8) ScanRow(dest any, row int) error { case **uint8: *d = new(uint8) **d = value + case *bool: + switch value { + case 0: + *d = false + default: + *d = true + } default: if scan, ok := dest.(sql.Scanner); ok { return scan.Scan(value) @@ -1162,6 +1338,20 @@ func (col *UInt8) Append(v any) (nulls []uint8, err error) { } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UInt8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UInt8", @@ -1191,7 +1381,21 @@ func (col *UInt8) AppendRow(v any) error { } col.col.Append(t) default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(uint8)) } else { return &ColumnConverterError{ @@ -1281,6 +1485,20 @@ func (col *UInt16) Append(v any) (nulls []uint8, err error) { } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UInt16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UInt16", @@ -1304,7 +1522,21 @@ func (col *UInt16) AppendRow(v any) error { case nil: col.col.Append(0) default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(uint16)) } else { return &ColumnConverterError{ @@ -1394,6 +1626,20 @@ func (col *UInt32) Append(v any) (nulls []uint8, err error) { } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UInt32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UInt32", @@ -1417,7 +1663,21 @@ func (col *UInt32) AppendRow(v any) error { case nil: col.col.Append(0) default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(uint32)) } else { return &ColumnConverterError{ @@ -1507,6 +1767,20 @@ func (col *UInt64) Append(v any) (nulls []uint8, err error) { } } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UInt64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UInt64", @@ -1530,7 +1804,21 @@ func (col *UInt64) AppendRow(v any) error { case nil: col.col.Append(0) default: - if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() && rv.CanConvert(col.ScanType()) { + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UInt64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + + if rv := reflect.ValueOf(v); rv.Kind() == col.ScanType().Kind() || rv.CanConvert(col.ScanType()) { col.col.Append(rv.Convert(col.ScanType()).Interface().(uint64)) } else { return &ColumnConverterError{ diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go index 52ff708a..bc4f77db 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date.go @@ -19,6 +19,7 @@ package column import ( "database/sql" + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -156,6 +157,18 @@ func (col *Date) Append(v any) (nulls []uint8, err error) { } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Date", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Date", @@ -215,6 +228,18 @@ func (col *Date) AppendRow(v any) error { col.col.Append(datetime) } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Date", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } s, ok := v.(fmt.Stringer) if ok { return col.AppendRow(s.String()) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go index 4e3fa1c2..e23429de 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/date32.go @@ -19,6 +19,7 @@ package column import ( "database/sql" + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -146,6 +147,18 @@ func (col *Date32) Append(v any) (nulls []uint8, err error) { } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Date32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Date32", @@ -205,6 +218,18 @@ func (col *Date32) AppendRow(v any) error { col.col.Append(value) } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Date32", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } s, ok := v.(fmt.Stringer) if ok { return col.AppendRow(s.String()) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go index 48b4b17b..d5dfffad 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime.go @@ -19,6 +19,7 @@ package column import ( "database/sql" + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -188,6 +189,18 @@ func (col *DateTime) Append(v any) (nulls []uint8, err error) { } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "DateTime", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "DateTime", @@ -257,6 +270,18 @@ func (col *DateTime) AppendRow(v any) error { col.col.Append(dateTime) } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "DateTime", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } s, ok := v.(fmt.Stringer) if ok { return col.AppendRow(s.String()) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go index 882a86d2..f5a5a948 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/datetime64.go @@ -19,6 +19,7 @@ package column import ( "database/sql" + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "math" @@ -31,8 +32,8 @@ import ( ) var ( - minDateTime64, _ = time.Parse("2006-01-02 15:04:05", "1925-01-01 00:00:00") - maxDateTime64, _ = time.Parse("2006-01-02 15:04:05", "2283-11-11 00:00:00") + minDateTime64, _ = time.Parse("2006-01-02 15:04:05", "1900-01-01 00:00:00") + maxDateTime64, _ = time.Parse("2006-01-02 15:04:05", "2262-04-11 23:47:16") ) const ( @@ -193,6 +194,18 @@ func (col *DateTime64) Append(v any) (nulls []uint8, err error) { col.AppendRow(v[i]) } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Datetime64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Datetime64", @@ -251,6 +264,18 @@ func (col *DateTime64) AppendRow(v any) error { case nil: col.col.Append(time.Time{}) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Datetime64", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } s, ok := v.(fmt.Stringer) if ok { return col.AppendRow(s.String()) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go index a64f5826..74b7d75f 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/decimal.go @@ -19,6 +19,7 @@ package column import ( "database/sql" + "database/sql/driver" "encoding/binary" "errors" "fmt" @@ -170,6 +171,18 @@ func (col *Decimal) Append(v any) (nulls []uint8, err error) { } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: string(col.chType), @@ -190,6 +203,18 @@ func (col *Decimal) AppendRow(v any) error { } case nil: default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: string(col.chType), diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go index 5bbfe0fa..c394e7ff 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum16.go @@ -19,6 +19,7 @@ package column import ( "database/sql" + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -153,6 +154,24 @@ func (col *Enum16) Append(v any) (nulls []uint8, err error) { nulls[i] = 1 } } + default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Enum16", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ + Op: "Append", + To: "Enum16", + From: fmt.Sprintf("%T", v), + } } return } @@ -214,6 +233,18 @@ func (col *Enum16) AppendRow(elem any) error { case nil: col.col.Append(0) default: + if valuer, ok := elem.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Enum16", + From: fmt.Sprintf("%T", elem), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } if s, ok := elem.(fmt.Stringer); ok { return col.AppendRow(s.String()) } else { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go index 9880c6fe..4aee561a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/enum8.go @@ -19,6 +19,7 @@ package column import ( "database/sql" + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -154,6 +155,18 @@ func (col *Enum8) Append(v any) (nulls []uint8, err error) { } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Enum8", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Enum8", @@ -220,6 +233,19 @@ func (col *Enum8) AppendRow(elem any) error { case nil: col.col.Append(0) default: + if valuer, ok := elem.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Enum8", + From: fmt.Sprintf("%T", elem), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + if s, ok := elem.(fmt.Stringer); ok { return col.AppendRow(s.String()) } else { diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go index a836f748..8ddb0d1c 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/fixed_string.go @@ -126,6 +126,18 @@ func (col *FixedString) Append(v any) (nulls []uint8, err error) { col.col.Append(data) nulls = make([]uint8, len(data)/col.col.Size) default: + if s, ok := v.(driver.Valuer); ok { + val, err := s.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "FixedString", + From: fmt.Sprintf("%T", s), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "FixedString", @@ -159,22 +171,12 @@ func (col *FixedString) AppendRow(v any) (err error) { if err != nil { return &ColumnConverterError{ Op: "AppendRow", - To: "String", + To: "FixedString", From: fmt.Sprintf("%T", s), Hint: "could not get driver.Valuer value", } } - - if s, ok := val.(string); ok { - return col.AppendRow(s) - } - - return &ColumnConverterError{ - Op: "AppendRow", - To: "String", - From: fmt.Sprintf("%T", v), - Hint: "driver.Valuer value is not a string", - } + return col.AppendRow(val) } if s, ok := v.(fmt.Stringer); ok { @@ -183,7 +185,7 @@ func (col *FixedString) AppendRow(v any) (err error) { return &ColumnConverterError{ Op: "AppendRow", - To: "String", + To: "FixedString", From: fmt.Sprintf("%T", v), } } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go index 9b5ebe7b..2839a41c 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_multi_polygon.go @@ -18,6 +18,7 @@ package column import ( + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -91,6 +92,18 @@ func (col *MultiPolygon) Append(v any) (nulls []uint8, err error) { } return col.set.Append(values) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "MultiPolygon", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "MultiPolygon", @@ -106,6 +119,18 @@ func (col *MultiPolygon) AppendRow(v any) error { case *orb.MultiPolygon: return col.set.AppendRow([]orb.Polygon(*v)) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "MultiPolygon", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "MultiPolygon", diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go index 9d3e1d34..c93a715a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_point.go @@ -18,6 +18,7 @@ package column import ( + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -95,6 +96,18 @@ func (col *Point) Append(v any) (nulls []uint8, err error) { }) } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Point", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Point", @@ -116,6 +129,18 @@ func (col *Point) AppendRow(v any) error { Y: v.Lat(), }) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Point", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "Point", diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go index accc14f8..54226081 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_polygon.go @@ -18,6 +18,7 @@ package column import ( + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -91,6 +92,18 @@ func (col *Polygon) Append(v any) (nulls []uint8, err error) { } return col.set.Append(values) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Polygon", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Polygon", @@ -106,6 +119,18 @@ func (col *Polygon) AppendRow(v any) error { case *orb.Polygon: return col.set.AppendRow([]orb.Ring(*v)) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Polygon", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "Polygon", diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go index 4580e41c..0f190a8e 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/geo_ring.go @@ -18,6 +18,7 @@ package column import ( + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -91,6 +92,18 @@ func (col *Ring) Append(v any) (nulls []uint8, err error) { } return col.set.Append(values) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "Ring", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "Ring", @@ -106,6 +119,18 @@ func (col *Ring) AppendRow(v any) error { case *orb.Ring: return col.set.AppendRow([]orb.Point(*v)) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "Ring", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "Ring", diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go index bf906304..a15f6d3e 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv4.go @@ -18,6 +18,7 @@ package column import ( + "database/sql/driver" "encoding/binary" "fmt" "github.com/ClickHouse/ch-go/proto" @@ -71,6 +72,11 @@ func (col *IPv4) ScanRow(dest any, row int) error { case **net.IP: *d = new(net.IP) **d = col.row(row) + case *netip.Addr: + *d = col.rowAddr(row) + case **netip.Addr: + *d = new(netip.Addr) + **d = col.rowAddr(row) case *uint32: ipV4 := col.row(row).To4() if ipV4 == nil { @@ -198,6 +204,18 @@ func (col *IPv4) Append(v any) (nulls []uint8, err error) { } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "IPv4", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "IPv4", @@ -230,9 +248,19 @@ func (col *IPv4) AppendRow(v any) (err error) { case netip.Addr: col.col.Append(proto.ToIPv4(v)) case *netip.Addr: - col.col.Append(proto.ToIPv4(*v)) + switch { + case v != nil: + col.col.Append(proto.ToIPv4(*v)) + default: + col.col.Append(0) + } case net.IP: - col.col.Append(proto.ToIPv4(netIPToNetIPAddr(v))) + switch { + case len(v) == 0: + col.col.Append(0) + default: + col.col.Append(proto.ToIPv4(netIPToNetIPAddr(v))) + } case *net.IP: switch { case v != nil: @@ -252,6 +280,18 @@ func (col *IPv4) AppendRow(v any) (err error) { col.col.Append(0) } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "IPv4", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "IPv4", @@ -277,6 +317,10 @@ func (col *IPv4) row(i int) net.IP { return net.IPv4(ip[0], ip[1], ip[2], ip[3]).To4() } +func (col *IPv4) rowAddr(i int) netip.Addr { + return col.col.Row(i).ToIP() +} + func netIPToNetIPAddr(ip net.IP) netip.Addr { switch len(ip) { case 4: diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv6.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv6.go index 188b9cc6..a67d17ab 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv6.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/ipv6.go @@ -18,6 +18,7 @@ package column import ( + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "net" @@ -70,6 +71,11 @@ func (col *IPv6) ScanRow(dest any, row int) error { case **net.IP: *d = new(net.IP) **d = col.row(row) + case *netip.Addr: + *d = col.rowAddr(row) + case **netip.Addr: + *d = new(netip.Addr) + **d = col.rowAddr(row) case *[]byte: *d = col.row(row) case **[]byte: @@ -226,6 +232,18 @@ func (col *IPv6) Append(v any) (nulls []uint8, err error) { } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "IPv6", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "IPv6", @@ -310,6 +328,18 @@ func (col *IPv6) AppendRow(v any) (err error) { case nil: col.col.Append([16]byte{}) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "IPv6", + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.Type()), + } + } + return col.AppendRow(val) + } return &ColumnConverterError{ Op: "AppendRow", To: "IPv6", @@ -328,6 +358,10 @@ func (col *IPv6) Encode(buffer *proto.Buffer) { } func IPv6ToBytes(ip net.IP) [16]byte { + if ip == nil { + return [16]byte{} + } + if len(ip) == 4 { ip = ip.To16() } @@ -340,4 +374,8 @@ func (col *IPv6) row(i int) net.IP { return src[:] } +func (col *IPv6) rowAddr(i int) netip.Addr { + return col.col.Row(i).ToIP() +} + var _ Interface = (*IPv6)(nil) diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go index 9e0357e0..0978a0b0 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/json.go @@ -40,7 +40,7 @@ var kindMappings = map[reflect.Kind]string{ reflect.Uint64: "UInt64", reflect.Float32: "Float32", reflect.Float64: "Float64", - reflect.Bool: "Boolean", + reflect.Bool: "Bool", } // complex types for which a mapping exists - currently we map to String but could enhance in the future for other types @@ -501,6 +501,13 @@ func appendStructOrMap(jCol *JSONObject, data any) error { Err: fmt.Errorf("map keys must be string for column %s", jCol.Name()), } } + if jCol.columns == nil && vData.Len() == 0 { + // if map is empty, we need to create an empty Tuple to make sure subcolumns protocol is happy + // _dummy is a ClickHouse internal name for empty Tuple subcolumn + // it has the same effect as `INSERT INTO single_json_type_table VALUES ('{}');` + jCol.upsertValue("_dummy", "Int8") + return jCol.insertEmptyColumn("_dummy") + } return iterateMap(vData, jCol, 0) } return &UnsupportedColumnTypeError{ diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go index c25f386e..85ccbe9e 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/map.go @@ -18,11 +18,13 @@ package column import ( + "database/sql/driver" "fmt" - "github.com/ClickHouse/ch-go/proto" "reflect" "strings" "time" + + "github.com/ClickHouse/ch-go/proto" ) // https://github.com/ClickHouse/ClickHouse/blob/master/src/Columns/ColumnMap.cpp @@ -41,6 +43,17 @@ type OrderedMap interface { Keys() <-chan any } +type MapIterator interface { + Next() bool + Key() any + Value() any +} + +type IterableOrderedMap interface { + Put(key any, value any) + Iterator() MapIterator +} + func (col *Map) Reset() { col.keys.Reset() col.values.Reset() @@ -93,6 +106,13 @@ func (col *Map) ScanRow(dest any, i int) error { value.Set(col.row(i)) return nil } + if om, ok := dest.(IterableOrderedMap); ok { + keys, values := col.orderedRow(i) + for i := range keys { + om.Put(keys[i], values[i]) + } + return nil + } if om, ok := dest.(OrderedMap); ok { keys, values := col.orderedRow(i) for i := range keys { @@ -111,6 +131,18 @@ func (col *Map) ScanRow(dest any, i int) error { func (col *Map) Append(v any) (nulls []uint8, err error) { value := reflect.Indirect(reflect.ValueOf(v)) if value.Kind() != reflect.Slice { + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.scanType), + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: string(col.chType), @@ -150,6 +182,27 @@ func (col *Map) AppendRow(v any) error { return nil } + if orderedMap, ok := v.(IterableOrderedMap); ok { + var size int64 + iter := orderedMap.Iterator() + for iter.Next() { + key, value := iter.Key(), iter.Value() + size++ + if err := col.keys.AppendRow(key); err != nil { + return err + } + if err := col.values.AppendRow(value); err != nil { + return err + } + } + var prev int64 + if n := col.offsets.Rows(); n != 0 { + prev = col.offsets.col.Row(n - 1) + } + col.offsets.col.Append(prev + size) + return nil + } + if orderedMap, ok := v.(OrderedMap); ok { var size int64 for key := range orderedMap.Keys() { @@ -173,6 +226,19 @@ func (col *Map) AppendRow(v any) error { return nil } + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: fmt.Sprintf("could not get driver.Valuer value, try using %s", col.scanType), + } + } + return col.AppendRow(val) + } + return &ColumnConverterError{ Op: "AppendRow", To: string(col.chType), diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go index 09085ad3..5ce480b0 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/string.go @@ -112,31 +112,21 @@ func (col *String) AppendRow(v any) error { col.col.Append("") } case []byte: - col.col.Append(string(v)) + col.col.AppendBytes(v) case nil: col.col.Append("") default: - if s, ok := v.(driver.Valuer); ok { - val, err := s.Value() + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() if err != nil { return &ColumnConverterError{ Op: "AppendRow", To: "String", - From: fmt.Sprintf("%T", s), + From: fmt.Sprintf("%T", v), Hint: "could not get driver.Valuer value", } } - - if s, ok := val.(string); ok { - return col.AppendRow(s) - } - - return &ColumnConverterError{ - Op: "AppendRow", - To: "String", - From: fmt.Sprintf("%T", v), - Hint: "driver.Valuer value is not a string", - } + return col.AppendRow(val) } if s, ok := v.(fmt.Stringer); ok { @@ -187,6 +177,19 @@ func (col *String) Append(v any) (nulls []uint8, err error) { col.col.Append(string(v[i])) } default: + + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "String", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: "String", diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go index 8be24750..95e00db0 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/tuple.go @@ -18,14 +18,17 @@ package column import ( + "database/sql" + "database/sql/driver" "fmt" - "github.com/ClickHouse/ch-go/proto" - "github.com/google/uuid" - "github.com/shopspring/decimal" "net" "reflect" "strings" "time" + + "github.com/ClickHouse/ch-go/proto" + "github.com/google/uuid" + "github.com/shopspring/decimal" ) type Tuple struct { @@ -209,6 +212,15 @@ func setJSONFieldValue(field reflect.Value, value reflect.Value) error { return nil } + // check if our target implements sql.Scanner + sqlScanner := reflect.TypeOf((*sql.Scanner)(nil)).Elem() + if fieldAddr := field.Addr(); field.Kind() != reflect.Ptr && fieldAddr.Type().Implements(sqlScanner) { + returns := fieldAddr.MethodByName("Scan").Call([]reflect.Value{value}) + if len(returns) > 0 && returns[0].IsNil() { + return nil + } + } + return &ColumnConverterError{ Op: "ScanRow", To: fmt.Sprintf("%T", field.Interface()), @@ -480,6 +492,18 @@ func (col *Tuple) Append(v any) (nulls []uint8, err error) { } return nil, nil } + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } return nil, &ColumnConverterError{ Op: "Append", To: string(col.chType), @@ -542,6 +566,19 @@ func (col *Tuple) AppendRow(v any) error { return nil } + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: string(col.chType), + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } + return &ColumnConverterError{ Op: "AppendRow", To: string(col.chType), diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/uuid.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/uuid.go index 3ee88eff..bf2a1c48 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/uuid.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/column/uuid.go @@ -19,6 +19,7 @@ package column import ( "database/sql" + "database/sql/driver" "fmt" "github.com/ClickHouse/ch-go/proto" "reflect" @@ -130,6 +131,19 @@ func (col *UUID) Append(v any) (nulls []uint8, err error) { } } default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return nil, &ColumnConverterError{ + Op: "Append", + To: "UUID", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.Append(val) + } + return nil, &ColumnConverterError{ Op: "Append", To: "UUID", @@ -170,6 +184,18 @@ func (col *UUID) AppendRow(v any) error { case nil: col.col.Append(uuid.UUID{}) default: + if valuer, ok := v.(driver.Valuer); ok { + val, err := valuer.Value() + if err != nil { + return &ColumnConverterError{ + Op: "AppendRow", + To: "UUID", + From: fmt.Sprintf("%T", v), + Hint: "could not get driver.Valuer value", + } + } + return col.AppendRow(val) + } if s, ok := v.(fmt.Stringer); ok { return col.AppendRow(s.String()) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go index 9b102460..f88bb43e 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/driver.go @@ -54,9 +54,9 @@ type ( Select(ctx context.Context, dest any, query string, args ...any) error Query(ctx context.Context, query string, args ...any) (Rows, error) QueryRow(ctx context.Context, query string, args ...any) Row - PrepareBatch(ctx context.Context, query string) (Batch, error) + PrepareBatch(ctx context.Context, query string, opts ...PrepareBatchOption) (Batch, error) Exec(ctx context.Context, query string, args ...any) error - AsyncInsert(ctx context.Context, query string, wait bool) error + AsyncInsert(ctx context.Context, query string, wait bool, args ...any) error Ping(context.Context) error Stats() Stats Close() error @@ -84,6 +84,7 @@ type ( Flush() error Send() error IsSent() bool + Rows() int } BatchColumn interface { Append(any) error diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go new file mode 100644 index 00000000..d81760c9 --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/driver/options.go @@ -0,0 +1,13 @@ +package driver + +type PrepareBatchOptions struct { + ReleaseConnection bool +} + +type PrepareBatchOption func(options *PrepareBatchOptions) + +func WithReleaseConnection() PrepareBatchOption { + return func(options *PrepareBatchOptions) { + options.ReleaseConnection = true + } +} diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go index c1b8d605..79a5f13f 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/block.go @@ -227,7 +227,7 @@ func (b *Block) Decode(reader *proto.Reader, revision uint64) (err error) { if hasCustom { return &BlockError{ Op: "Decode", - Err: errors.New(fmt.Sprintf("custom serialization for column %s. not supported", columnName)), + Err: errors.New(fmt.Sprintf("custom serialization for column %s. not supported by clickhouse-go driver", columnName)), } } } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/const.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/const.go index 1e7321b9..cae29e9a 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/const.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/const.go @@ -19,24 +19,25 @@ package proto // see https://github.com/ClickHouse/ClickHouse/blob/master/src/Core/Protocol.h const ( - DBMS_MIN_REVISION_WITH_CLIENT_INFO = 54032 - DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058 - DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060 - DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME = 54372 - DBMS_MIN_REVISION_WITH_VERSION_PATCH = 54401 - DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO = 54420 - DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS = 54429 - DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET = 54441 - DBMS_MIN_REVISION_WITH_OPENTELEMETRY = 54442 - DBMS_MIN_PROTOCOL_VERSION_WITH_DISTRIBUTED_DEPTH = 54448 - DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME = 54449 - DBMS_MIN_PROTOCOL_VERSION_WITH_INCREMENTAL_PROFILE_EVENTS = 54451 - DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS = 54453 - DBMS_MIN_REVISION_WITH_CUSTOM_SERIALIZATION = 54454 - DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM = 54458 - DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY = 54458 - DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS = 54459 - DBMS_TCP_PROTOCOL_VERSION = DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS + DBMS_MIN_REVISION_WITH_CLIENT_INFO = 54032 + DBMS_MIN_REVISION_WITH_SERVER_TIMEZONE = 54058 + DBMS_MIN_REVISION_WITH_QUOTA_KEY_IN_CLIENT_INFO = 54060 + DBMS_MIN_REVISION_WITH_SERVER_DISPLAY_NAME = 54372 + DBMS_MIN_REVISION_WITH_VERSION_PATCH = 54401 + DBMS_MIN_REVISION_WITH_CLIENT_WRITE_INFO = 54420 + DBMS_MIN_REVISION_WITH_SETTINGS_SERIALIZED_AS_STRINGS = 54429 + DBMS_MIN_REVISION_WITH_INTERSERVER_SECRET = 54441 + DBMS_MIN_REVISION_WITH_OPENTELEMETRY = 54442 + DBMS_MIN_PROTOCOL_VERSION_WITH_DISTRIBUTED_DEPTH = 54448 + DBMS_MIN_PROTOCOL_VERSION_WITH_INITIAL_QUERY_START_TIME = 54449 + DBMS_MIN_PROTOCOL_VERSION_WITH_INCREMENTAL_PROFILE_EVENTS = 54451 + DBMS_MIN_REVISION_WITH_PARALLEL_REPLICAS = 54453 + DBMS_MIN_REVISION_WITH_CUSTOM_SERIALIZATION = 54454 + DBMS_MIN_PROTOCOL_VERSION_WITH_ADDENDUM = 54458 + DBMS_MIN_PROTOCOL_VERSION_WITH_QUOTA_KEY = 54458 + DBMS_MIN_PROTOCOL_VERSION_WITH_PARAMETERS = 54459 + DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRES = 54460 + DBMS_TCP_PROTOCOL_VERSION = DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRES ) const ( diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/progress.go b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/progress.go index f7de0602..4bff94e6 100644 --- a/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/progress.go +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/lib/proto/progress.go @@ -19,6 +19,7 @@ package proto import ( "fmt" + "time" chproto "github.com/ClickHouse/ch-go/proto" ) @@ -29,6 +30,7 @@ type Progress struct { TotalRows uint64 WroteRows uint64 WroteBytes uint64 + Elapsed time.Duration withClient bool } @@ -51,18 +53,28 @@ func (p *Progress) Decode(reader *chproto.Reader, revision uint64) (err error) { return err } } + + if revision >= DBMS_MIN_PROTOCOL_VERSION_WITH_SERVER_QUERY_TIME_IN_PROGRES { + var n uint64 + if n, err = reader.UVarInt(); err != nil { + return err + } + p.Elapsed = time.Duration(n) * time.Nanosecond + } + return nil } func (p *Progress) String() string { if !p.withClient { - return fmt.Sprintf("rows=%d, bytes=%d, total rows=%d", p.Rows, p.Bytes, p.TotalRows) + return fmt.Sprintf("rows=%d, bytes=%d, total rows=%d, elapsed=%s", p.Rows, p.Bytes, p.TotalRows, p.Elapsed.String()) } - return fmt.Sprintf("rows=%d, bytes=%d, total rows=%d, wrote rows=%d wrote bytes=%d", + return fmt.Sprintf("rows=%d, bytes=%d, total rows=%d, wrote rows=%d wrote bytes=%d elapsed=%s", p.Rows, p.Bytes, p.TotalRows, p.WroteRows, p.WroteBytes, + p.Elapsed.String(), ) } diff --git a/vendor/github.com/ClickHouse/clickhouse-go/v2/main.tf b/vendor/github.com/ClickHouse/clickhouse-go/v2/main.tf new file mode 100644 index 00000000..95a0e7da --- /dev/null +++ b/vendor/github.com/ClickHouse/clickhouse-go/v2/main.tf @@ -0,0 +1,59 @@ +terraform { + required_providers { + clickhouse = { + source = "ClickHouse/clickhouse" + version = "~> 0.0.2" + } + } +} + +variable "organization_id" { + type = string +} + +variable "token_key" { + type = string +} + +variable "token_secret" { + type = string +} + +variable "service_name" { + type = string +} + +variable "service_password" { + type = string +} + +provider clickhouse { + environment = "production" + organization_id = var.organization_id + token_key = var.token_key + token_secret = var.token_secret +} + +resource "clickhouse_service" "service" { + name = var.service_name + cloud_provider = "aws" + region = "us-east-2" + tier = "development" + idle_scaling = true + password = var.service_password + + ip_access = [ + { + source = "0.0.0.0/0" + description = "Anywhere" + } + ] +} + +output "CLICKHOUSE_HOST" { + value = clickhouse_service.service.endpoints.0.host +} + +output "SERVICE_ID" { + value = clickhouse_service.service.id +} diff --git a/vendor/github.com/andybalholm/brotli/README.md b/vendor/github.com/andybalholm/brotli/README.md index 1ea7fdb7..00625211 100644 --- a/vendor/github.com/andybalholm/brotli/README.md +++ b/vendor/github.com/andybalholm/brotli/README.md @@ -2,6 +2,13 @@ This package is a brotli compressor and decompressor implemented in Go. It was translated from the reference implementation (https://github.com/google/brotli) with the `c2go` tool at https://github.com/andybalholm/c2go. +I have been working on new compression algorithms (not translated from C) +in the matchfinder package. +You can use them with the NewWriterV2 function. +Currently they give better results than the old implementation +(at least for compressing my test file, Newton’s *Opticks*) +on levels 2 to 6. + I am using it in production with https://github.com/andybalholm/redwood. API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc. diff --git a/vendor/github.com/andybalholm/brotli/bitwriter.go b/vendor/github.com/andybalholm/brotli/bitwriter.go new file mode 100644 index 00000000..dfc60360 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/bitwriter.go @@ -0,0 +1,56 @@ +package brotli + +/* Copyright 2010 Google Inc. All Rights Reserved. + + Distributed under MIT license. + See file LICENSE for detail or copy at https://opensource.org/licenses/MIT +*/ + +/* Write bits into a byte array. */ + +type bitWriter struct { + dst []byte + + // Data waiting to be written is the low nbits of bits. + bits uint64 + nbits uint +} + +func (w *bitWriter) writeBits(nb uint, b uint64) { + w.bits |= b << w.nbits + w.nbits += nb + if w.nbits >= 32 { + bits := w.bits + w.bits >>= 32 + w.nbits -= 32 + w.dst = append(w.dst, + byte(bits), + byte(bits>>8), + byte(bits>>16), + byte(bits>>24), + ) + } +} + +func (w *bitWriter) writeSingleBit(bit bool) { + if bit { + w.writeBits(1, 1) + } else { + w.writeBits(1, 0) + } +} + +func (w *bitWriter) jumpToByteBoundary() { + dst := w.dst + for w.nbits != 0 { + dst = append(dst, byte(w.bits)) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + } + w.bits = 0 + w.dst = dst +} diff --git a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go index 7acfb180..ee655298 100644 --- a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go +++ b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go @@ -7,12 +7,18 @@ import ( const maxHuffmanTreeSize = (2*numCommandSymbols + 1) -/* The maximum size of Huffman dictionary for distances assuming that - NPOSTFIX = 0 and NDIRECT = 0. */ +/* +The maximum size of Huffman dictionary for distances assuming that + + NPOSTFIX = 0 and NDIRECT = 0. +*/ const maxSimpleDistanceAlphabetSize = 140 -/* Represents the range of values belonging to a prefix code: - [offset, offset + 2^nbits) */ +/* +Represents the range of values belonging to a prefix code: + + [offset, offset + 2^nbits) +*/ type prefixCodeRange struct { offset uint32 nbits uint32 @@ -96,9 +102,12 @@ func nextBlockTypeCode(calculator *blockTypeCodeCalculator, type_ byte) uint { return type_code } -/* |nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) - REQUIRES: length > 0 - REQUIRES: length <= (1 << 24) */ +/* +|nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) { var lg uint if length == 1 { @@ -132,8 +141,11 @@ func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) { writeBits(uint(insnumextra+getCopyExtra(copycode)), bits, storage_ix, storage) } -/* Data structure that stores almost everything that is needed to encode each - block switch command. */ +/* +Data structure that stores almost everything that is needed to encode each + + block switch command. +*/ type blockSplitCode struct { type_code_calculator blockTypeCodeCalculator type_depths [maxBlockTypeSymbols]byte @@ -154,9 +166,12 @@ func storeVarLenUint8(n uint, storage_ix *uint, storage []byte) { } } -/* Stores the compressed meta-block header. - REQUIRES: length > 0 - REQUIRES: length <= (1 << 24) */ +/* +Stores the compressed meta-block header. + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix *uint, storage []byte) { var lenbits uint64 var nlenbits uint @@ -186,9 +201,12 @@ func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix } } -/* Stores the uncompressed meta-block header. - REQUIRES: length > 0 - REQUIRES: length <= (1 << 24) */ +/* +Stores the uncompressed meta-block header. + + REQUIRES: length > 0 + REQUIRES: length <= (1 << 24) +*/ func storeUncompressedMetaBlockHeader(length uint, storage_ix *uint, storage []byte) { var lenbits uint64 var nlenbits uint @@ -312,8 +330,11 @@ func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max } } -/* num = alphabet size - depths = symbol depths */ +/* +num = alphabet size + + depths = symbol depths +*/ func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *uint, storage []byte) { var huffman_tree [numCommandSymbols]byte var huffman_tree_extra_bits [numCommandSymbols]byte @@ -367,8 +388,11 @@ func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *u storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], storage_ix, storage) } -/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and - bits[0:length] and stores the encoded tree to the bit stream. */ +/* +Builds a Huffman tree from histogram[0:length] into depth[0:length] and + + bits[0:length] and stores the encoded tree to the bit stream. +*/ func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { var count uint = 0 var s4 = [4]uint{0} @@ -623,6 +647,203 @@ func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_ } } +func buildAndStoreHuffmanTreeFastBW(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, bw *bitWriter) { + var count uint = 0 + var symbols = [4]uint{0} + var length uint = 0 + var total uint = histogram_total + for total != 0 { + if histogram[length] != 0 { + if count < 4 { + symbols[count] = length + } + + count++ + total -= uint(histogram[length]) + } + + length++ + } + + if count <= 1 { + bw.writeBits(4, 1) + bw.writeBits(max_bits, uint64(symbols[0])) + depth[symbols[0]] = 0 + bits[symbols[0]] = 0 + return + } + + for i := 0; i < int(length); i++ { + depth[i] = 0 + } + { + var max_tree_size uint = 2*length + 1 + tree, _ := huffmanTreePool.Get().(*[]huffmanTree) + if tree == nil || cap(*tree) < int(max_tree_size) { + tmp := make([]huffmanTree, max_tree_size) + tree = &tmp + } else { + *tree = (*tree)[:max_tree_size] + } + var count_limit uint32 + for count_limit = 1; ; count_limit *= 2 { + var node int = 0 + var l uint + for l = length; l != 0; { + l-- + if histogram[l] != 0 { + if histogram[l] >= count_limit { + initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l)) + } else { + initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l)) + } + + node++ + } + } + { + var n int = node + /* Points to the next leaf node. */ /* Points to the next non-leaf node. */ + var sentinel huffmanTree + var i int = 0 + var j int = n + 1 + var k int + + sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1)) + + /* The nodes are: + [0, n): the sorted leaf nodes that we start with. + [n]: we add a sentinel here. + [n + 1, 2n): new parent nodes are added here, starting from + (n+1). These are naturally in ascending order. + [2n]: we add a sentinel at the end as well. + There will be (2n+1) elements at the end. */ + initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) + + (*tree)[node] = sentinel + node++ + (*tree)[node] = sentinel + node++ + + for k = n - 1; k > 0; k-- { + var left int + var right int + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + left = i + i++ + } else { + left = j + j++ + } + + if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { + right = i + i++ + } else { + right = j + j++ + } + + /* The sentinel node becomes the parent node. */ + (*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_ + + (*tree)[node-1].index_left_ = int16(left) + (*tree)[node-1].index_right_or_value_ = int16(right) + + /* Add back the last sentinel node. */ + (*tree)[node] = sentinel + node++ + } + + if setDepth(2*n-1, *tree, depth, 14) { + /* We need to pack the Huffman tree in 14 bits. If this was not + successful, add fake entities to the lowest values and retry. */ + break + } + } + } + + huffmanTreePool.Put(tree) + } + + convertBitDepthsToSymbols(depth, length, bits) + if count <= 4 { + var i uint + + /* value of 1 indicates a simple Huffman code */ + bw.writeBits(2, 1) + + bw.writeBits(2, uint64(count)-1) /* NSYM - 1 */ + + /* Sort */ + for i = 0; i < count; i++ { + var j uint + for j = i + 1; j < count; j++ { + if depth[symbols[j]] < depth[symbols[i]] { + var tmp uint = symbols[j] + symbols[j] = symbols[i] + symbols[i] = tmp + } + } + } + + if count == 2 { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + } else if count == 3 { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + bw.writeBits(max_bits, uint64(symbols[2])) + } else { + bw.writeBits(max_bits, uint64(symbols[0])) + bw.writeBits(max_bits, uint64(symbols[1])) + bw.writeBits(max_bits, uint64(symbols[2])) + bw.writeBits(max_bits, uint64(symbols[3])) + + /* tree-select */ + bw.writeSingleBit(depth[symbols[0]] == 1) + } + } else { + var previous_value byte = 8 + var i uint + + /* Complex Huffman Tree */ + storeStaticCodeLengthCodeBW(bw) + + /* Actual RLE coding. */ + for i = 0; i < length; { + var value byte = depth[i] + var reps uint = 1 + var k uint + for k = i + 1; k < length && depth[k] == value; k++ { + reps++ + } + + i += reps + if value == 0 { + bw.writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps]) + } else { + if previous_value != value { + bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value])) + reps-- + } + + if reps < 3 { + for reps != 0 { + reps-- + bw.writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value])) + } + } else { + reps -= 3 + bw.writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps]) + } + + previous_value = value + } + } + } +} + func indexOf(v []byte, v_size uint, value byte) uint { var i uint = 0 for ; i < v_size; i++ { @@ -674,12 +895,15 @@ func moveToFrontTransform(v_in []uint32, v_size uint, v_out []uint32) { } } -/* Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of - the run length plus extra bits (lower 9 bits is the prefix code and the rest - are the extra bits). Non-zero values in v[] are shifted by - *max_length_prefix. Will not create prefix codes bigger than the initial - value of *max_run_length_prefix. The prefix code of run length L is simply - Log2Floor(L) and the number of extra bits is the same as the prefix code. */ +/* +Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of + + the run length plus extra bits (lower 9 bits is the prefix code and the rest + are the extra bits). Non-zero values in v[] are shifted by + *max_length_prefix. Will not create prefix codes bigger than the initial + value of *max_run_length_prefix. The prefix code of run length L is simply + Log2Floor(L) and the number of extra bits is the same as the prefix code. +*/ func runLengthCodeZeros(in_size uint, v []uint32, out_size *uint, max_run_length_prefix *uint32) { var max_reps uint32 = 0 var i uint @@ -799,8 +1023,11 @@ func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, i writeBits(uint(len_nextra), uint64(len_extra), storage_ix, storage) } -/* Builds a BlockSplitCode data structure from the block split given by the - vector of block types and block lengths and stores it to the bit stream. */ +/* +Builds a BlockSplitCode data structure from the block split given by the + + vector of block types and block lengths and stores it to the bit stream. +*/ func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, storage_ix *uint, storage []byte) { var type_histo [maxBlockTypeSymbols]uint32 var length_histo [numBlockLenSymbols]uint32 @@ -919,14 +1146,20 @@ func cleanupBlockEncoder(self *blockEncoder) { blockEncoderPool.Put(self) } -/* Creates entropy codes of block lengths and block types and stores them - to the bit stream. */ +/* +Creates entropy codes of block lengths and block types and stores them + + to the bit stream. +*/ func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, storage_ix *uint, storage []byte) { buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, storage_ix, storage) } -/* Stores the next symbol with the entropy code of the current block type. - Updates the block type and block length at block boundaries. */ +/* +Stores the next symbol with the entropy code of the current block type. + + Updates the block type and block length at block boundaries. +*/ func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []byte) { if self.block_len_ == 0 { self.block_ix_++ @@ -945,9 +1178,12 @@ func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []by } } -/* Stores the next symbol with the entropy code of the current block type and - context value. - Updates the block type and block length at block boundaries. */ +/* +Stores the next symbol with the entropy code of the current block type and + + context value. + Updates the block type and block length at block boundaries. +*/ func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, storage_ix *uint, storage []byte, context_bits uint) { if self.block_len_ == 0 { self.block_ix_++ @@ -1268,8 +1504,11 @@ func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is } } -/* This is for storing uncompressed blocks (simple raw storage of - bytes-as-bytes). */ +/* +This is for storing uncompressed blocks (simple raw storage of + + bytes-as-bytes). +*/ func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, storage_ix *uint, storage []byte) { var masked_pos uint = position & mask storeUncompressedMetaBlockHeader(uint(len), storage_ix, storage) diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go index 172dc7f4..79f9c7fd 100644 --- a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go +++ b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go @@ -39,8 +39,11 @@ func isMatch1(p1 []byte, p2 []byte, length uint) bool { return p1[4] == p2[4] && p1[5] == p2[5] } -/* Builds a command and distance prefix code (each 64 symbols) into "depth" and - "bits" based on "histogram" and stores it into the bit stream. */ +/* +Builds a command and distance prefix code (each 64 symbols) into "depth" and + + "bits" based on "histogram" and stores it into the bit stream. +*/ func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { var tree [129]huffmanTree var cmd_depth = [numCommandSymbols]byte{0} @@ -216,6 +219,25 @@ func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, stor writeSingleBit(is_uncompressed, storage_ix, storage) } +func storeMetaBlockHeaderBW(len uint, is_uncompressed bool, bw *bitWriter) { + var nibbles uint = 6 + + /* ISLAST */ + bw.writeBits(1, 0) + + if len <= 1<<16 { + nibbles = 4 + } else if len <= 1<<20 { + nibbles = 5 + } + + bw.writeBits(2, uint64(nibbles)-4) + bw.writeBits(nibbles*4, uint64(len)-1) + + /* ISUNCOMPRESSED */ + bw.writeSingleBit(is_uncompressed) +} + func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) { var ip int = 0 var shift uint = 64 - table_bits @@ -710,19 +732,22 @@ func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, co } } -/* Compresses "input" string to the "*storage" buffer as one or more complete - meta-blocks, and updates the "*storage_ix" bit position. +/* +Compresses "input" string to the "*storage" buffer as one or more complete - If "is_last" is 1, emits an additional empty last meta-block. + meta-blocks, and updates the "*storage_ix" bit position. - REQUIRES: "input_size" is greater than zero, or "is_last" is 1. - REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). - REQUIRES: "command_buf" and "literal_buf" point to at least - kCompressFragmentTwoPassBlockSize long arrays. - REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. - REQUIRES: "table_size" is a power of two - OUTPUT: maximal copy distance <= |input_size| - OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ + If "is_last" is 1, emits an additional empty last meta-block. + + REQUIRES: "input_size" is greater than zero, or "is_last" is 1. + REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). + REQUIRES: "command_buf" and "literal_buf" point to at least + kCompressFragmentTwoPassBlockSize long arrays. + REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. + REQUIRES: "table_size" is a power of two + OUTPUT: maximal copy distance <= |input_size| + OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) +*/ func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) { var initial_storage_ix uint = *storage_ix var table_bits uint = uint(log2FloorNonZero(table_size)) diff --git a/vendor/github.com/andybalholm/brotli/encoder.go b/vendor/github.com/andybalholm/brotli/encoder.go new file mode 100644 index 00000000..650d1e42 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/encoder.go @@ -0,0 +1,168 @@ +package brotli + +import "github.com/andybalholm/brotli/matchfinder" + +// An Encoder implements the matchfinder.Encoder interface, writing in Brotli format. +type Encoder struct { + wroteHeader bool + bw bitWriter + distCache []distanceCode +} + +func (e *Encoder) Reset() { + e.wroteHeader = false + e.bw = bitWriter{} +} + +func (e *Encoder) Encode(dst []byte, src []byte, matches []matchfinder.Match, lastBlock bool) []byte { + e.bw.dst = dst + if !e.wroteHeader { + e.bw.writeBits(4, 15) + e.wroteHeader = true + } + + var literalHisto [256]uint32 + var commandHisto [704]uint32 + var distanceHisto [64]uint32 + literalCount := 0 + commandCount := 0 + distanceCount := 0 + + if len(e.distCache) < len(matches) { + e.distCache = make([]distanceCode, len(matches)) + } + + // first pass: build the histograms + pos := 0 + + // d is the ring buffer of the last 4 distances. + d := [4]int{-10, -10, -10, -10} + for i, m := range matches { + if m.Unmatched > 0 { + for _, c := range src[pos : pos+m.Unmatched] { + literalHisto[c]++ + } + literalCount += m.Unmatched + } + + insertCode := getInsertLengthCode(uint(m.Unmatched)) + copyCode := getCopyLengthCode(uint(m.Length)) + if m.Length == 0 { + // If the stream ends with unmatched bytes, we need a dummy copy length. + copyCode = 2 + } + command := combineLengthCodes(insertCode, copyCode, false) + commandHisto[command]++ + commandCount++ + + if command >= 128 && m.Length != 0 { + var distCode distanceCode + switch m.Distance { + case d[3]: + distCode.code = 0 + case d[2]: + distCode.code = 1 + case d[1]: + distCode.code = 2 + case d[0]: + distCode.code = 3 + case d[3] - 1: + distCode.code = 4 + case d[3] + 1: + distCode.code = 5 + case d[3] - 2: + distCode.code = 6 + case d[3] + 2: + distCode.code = 7 + case d[3] - 3: + distCode.code = 8 + case d[3] + 3: + distCode.code = 9 + + // In my testing, codes 10–15 actually reduced the compression ratio. + + default: + distCode = getDistanceCode(m.Distance) + } + e.distCache[i] = distCode + distanceHisto[distCode.code]++ + distanceCount++ + if distCode.code != 0 { + d[0], d[1], d[2], d[3] = d[1], d[2], d[3], m.Distance + } + } + + pos += m.Unmatched + m.Length + } + + storeMetaBlockHeaderBW(uint(len(src)), false, &e.bw) + e.bw.writeBits(13, 0) + + var literalDepths [256]byte + var literalBits [256]uint16 + buildAndStoreHuffmanTreeFastBW(literalHisto[:], uint(literalCount), 8, literalDepths[:], literalBits[:], &e.bw) + + var commandDepths [704]byte + var commandBits [704]uint16 + buildAndStoreHuffmanTreeFastBW(commandHisto[:], uint(commandCount), 10, commandDepths[:], commandBits[:], &e.bw) + + var distanceDepths [64]byte + var distanceBits [64]uint16 + buildAndStoreHuffmanTreeFastBW(distanceHisto[:], uint(distanceCount), 6, distanceDepths[:], distanceBits[:], &e.bw) + + pos = 0 + for i, m := range matches { + insertCode := getInsertLengthCode(uint(m.Unmatched)) + copyCode := getCopyLengthCode(uint(m.Length)) + if m.Length == 0 { + // If the stream ends with unmatched bytes, we need a dummy copy length. + copyCode = 2 + } + command := combineLengthCodes(insertCode, copyCode, false) + e.bw.writeBits(uint(commandDepths[command]), uint64(commandBits[command])) + if kInsExtra[insertCode] > 0 { + e.bw.writeBits(uint(kInsExtra[insertCode]), uint64(m.Unmatched)-uint64(kInsBase[insertCode])) + } + if kCopyExtra[copyCode] > 0 { + e.bw.writeBits(uint(kCopyExtra[copyCode]), uint64(m.Length)-uint64(kCopyBase[copyCode])) + } + + if m.Unmatched > 0 { + for _, c := range src[pos : pos+m.Unmatched] { + e.bw.writeBits(uint(literalDepths[c]), uint64(literalBits[c])) + } + } + + if command >= 128 && m.Length != 0 { + distCode := e.distCache[i] + e.bw.writeBits(uint(distanceDepths[distCode.code]), uint64(distanceBits[distCode.code])) + if distCode.nExtra > 0 { + e.bw.writeBits(distCode.nExtra, distCode.extraBits) + } + } + + pos += m.Unmatched + m.Length + } + + if lastBlock { + e.bw.writeBits(2, 3) // islast + isempty + e.bw.jumpToByteBoundary() + } + return e.bw.dst +} + +type distanceCode struct { + code int + nExtra uint + extraBits uint64 +} + +func getDistanceCode(distance int) distanceCode { + d := distance + 3 + nbits := log2FloorNonZero(uint(d)) - 1 + prefix := (d >> nbits) & 1 + offset := (2 + prefix) << nbits + distcode := int(2*(nbits-1)) + prefix + 16 + extra := d - offset + return distanceCode{distcode, uint(nbits), uint64(extra)} +} diff --git a/vendor/github.com/andybalholm/brotli/entropy_encode_static.go b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go index 5ddf3fcb..294aff4f 100644 --- a/vendor/github.com/andybalholm/brotli/entropy_encode_static.go +++ b/vendor/github.com/andybalholm/brotli/entropy_encode_static.go @@ -782,6 +782,11 @@ func storeStaticCodeLengthCode(storage_ix *uint, storage []byte) { writeBits(40, 0x0000FF55555554, storage_ix, storage) } +func storeStaticCodeLengthCodeBW(bw *bitWriter) { + bw.writeBits(32, 0x55555554) + bw.writeBits(8, 0xFF) +} + var kZeroRepsBits = [numCommandSymbols]uint64{ 0x00000000, 0x00000000, diff --git a/vendor/github.com/andybalholm/brotli/http.go b/vendor/github.com/andybalholm/brotli/http.go index 1e981963..3d3a8a06 100644 --- a/vendor/github.com/andybalholm/brotli/http.go +++ b/vendor/github.com/andybalholm/brotli/http.go @@ -11,15 +11,7 @@ import ( // the Accept-Encoding header, sets the Content-Encoding header, and returns a // WriteCloser that implements that compression. The Close method must be called // before the current HTTP handler returns. -// -// Due to https://github.com/golang/go/issues/31753, the response will not be -// compressed unless you set a Content-Type header before you call -// HTTPCompressor. func HTTPCompressor(w http.ResponseWriter, r *http.Request) io.WriteCloser { - if w.Header().Get("Content-Type") == "" { - return nopCloser{w} - } - if w.Header().Get("Vary") == "" { w.Header().Set("Vary", "Accept-Encoding") } @@ -28,7 +20,7 @@ func HTTPCompressor(w http.ResponseWriter, r *http.Request) io.WriteCloser { switch encoding { case "br": w.Header().Set("Content-Encoding", "br") - return NewWriter(w) + return NewWriterV2(w, DefaultCompression) case "gzip": w.Header().Set("Content-Encoding", "gzip") return gzip.NewWriter(w) diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go new file mode 100644 index 00000000..37ed8e13 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/emitter.go @@ -0,0 +1,45 @@ +package matchfinder + +// An absoluteMatch is like a Match, but it stores indexes into the byte +// stream instead of lengths. +type absoluteMatch struct { + // Start is the index of the first byte. + Start int + + // End is the index of the byte after the last byte + // (so that End - Start = Length). + End int + + // Match is the index of the previous data that matches + // (Start - Match = Distance). + Match int +} + +// A matchEmitter manages the output of matches for a MatchFinder. +type matchEmitter struct { + // Dst is the destination slice that Matches are added to. + Dst []Match + + // NextEmit is the index of the next byte to emit. + NextEmit int +} + +func (e *matchEmitter) emit(m absoluteMatch) { + e.Dst = append(e.Dst, Match{ + Unmatched: m.Start - e.NextEmit, + Length: m.End - m.Start, + Distance: m.Start - m.Match, + }) + e.NextEmit = m.End +} + +// trim shortens m if it extends past maxEnd. Then if the length is at least +// minLength, the match is emitted. +func (e *matchEmitter) trim(m absoluteMatch, maxEnd int, minLength int) { + if m.End > maxEnd { + m.End = maxEnd + } + if m.End-m.Start >= minLength { + e.emit(m) + } +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/m0.go b/vendor/github.com/andybalholm/brotli/matchfinder/m0.go new file mode 100644 index 00000000..773b7c49 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/m0.go @@ -0,0 +1,169 @@ +package matchfinder + +import ( + "encoding/binary" +) + +// M0 is an implementation of the MatchFinder interface based +// on the algorithm used by snappy, but modified to be more like the algorithm +// used by compression level 0 of the brotli reference implementation. +// +// It has a maximum block size of 65536 bytes. +type M0 struct { + // Lazy turns on "lazy matching," for higher compression but less speed. + Lazy bool + + MaxDistance int + MaxLength int +} + +func (M0) Reset() {} + +const ( + m0HashLen = 5 + + m0TableBits = 14 + m0TableSize = 1 << m0TableBits + m0Shift = 32 - m0TableBits + // m0TableMask is redundant, but helps the compiler eliminate bounds + // checks. + m0TableMask = m0TableSize - 1 +) + +func (m M0) hash(data uint64) uint64 { + hash := (data << (64 - 8*m0HashLen)) * hashMul64 + return hash >> (64 - m0TableBits) +} + +// FindMatches looks for matches in src, appends them to dst, and returns dst. +// src must not be longer than 65536 bytes. +func (m M0) FindMatches(dst []Match, src []byte) []Match { + const inputMargin = 16 - 1 + const minNonLiteralBlockSize = 1 + 1 + inputMargin + + if len(src) < minNonLiteralBlockSize { + dst = append(dst, Match{ + Unmatched: len(src), + }) + return dst + } + if len(src) > 65536 { + panic("block too long") + } + + var table [m0TableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := m.hash(binary.LittleEndian.Uint64(src[s:])) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&m0TableMask]) + table[nextHash&m0TableMask] = uint16(s) + nextHash = m.hash(binary.LittleEndian.Uint64(src[nextS:])) + if m.MaxDistance != 0 && s-candidate > m.MaxDistance { + continue + } + if binary.LittleEndian.Uint32(src[s:]) == binary.LittleEndian.Uint32(src[candidate:]) { + break + } + } + + // Invariant: we have a 4-byte match at s. + base := s + s = extendMatch(src, candidate+4, s+4) + + origBase := base + if m.Lazy && base+1 < sLimit { + newBase := base + 1 + h := m.hash(binary.LittleEndian.Uint64(src[newBase:])) + newCandidate := int(table[h&m0TableMask]) + table[h&m0TableMask] = uint16(newBase) + okDistance := true + if m.MaxDistance != 0 && newBase-newCandidate > m.MaxDistance { + okDistance = false + } + if okDistance && binary.LittleEndian.Uint32(src[newBase:]) == binary.LittleEndian.Uint32(src[newCandidate:]) { + newS := extendMatch(src, newCandidate+4, newBase+4) + if newS-newBase > s-base+1 { + s = newS + base = newBase + candidate = newCandidate + } + } + } + + if m.MaxLength != 0 && s-base > m.MaxLength { + s = base + m.MaxLength + } + dst = append(dst, Match{ + Unmatched: base - nextEmit, + Length: s - base, + Distance: base - candidate, + }) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + if m.Lazy { + // If lazy matching is enabled, we update the hash table for + // every byte in the match. + for i := origBase + 2; i < s-1; i++ { + x := binary.LittleEndian.Uint64(src[i:]) + table[m.hash(x)&m0TableMask] = uint16(i) + } + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. + x := binary.LittleEndian.Uint64(src[s-1:]) + prevHash := m.hash(x >> 0) + table[prevHash&m0TableMask] = uint16(s - 1) + nextHash = m.hash(x >> 8) + } + +emitRemainder: + if nextEmit < len(src) { + dst = append(dst, Match{ + Unmatched: len(src) - nextEmit, + }) + } + return dst +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/m4.go b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go new file mode 100644 index 00000000..5b2acba2 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/m4.go @@ -0,0 +1,297 @@ +package matchfinder + +import ( + "encoding/binary" + "math/bits" + "runtime" +) + +// M4 is an implementation of the MatchFinder +// interface that uses a hash table to find matches, +// optional match chains, +// and the advanced parsing technique from +// https://fastcompression.blogspot.com/2011/12/advanced-parsing-strategies.html. +type M4 struct { + // MaxDistance is the maximum distance (in bytes) to look back for + // a match. The default is 65535. + MaxDistance int + + // MinLength is the length of the shortest match to return. + // The default is 4. + MinLength int + + // HashLen is the number of bytes to use to calculate the hashes. + // The maximum is 8 and the default is 6. + HashLen int + + // TableBits is the number of bits in the hash table indexes. + // The default is 17 (128K entries). + TableBits int + + // ChainLength is how many entries to search on the "match chain" of older + // locations with the same hash as the current location. + ChainLength int + + // DistanceBitCost is used when comparing two matches to see + // which is better. The comparison is primarily based on the length + // of the matches, but it can also take the distance into account, + // in terms of the number of bits needed to represent the distance. + // One byte of length is given a score of 256, so 32 (256/8) would + // be a reasonable first guess for the value of one bit. + // (The default is 0, which bases the comparison solely on length.) + DistanceBitCost int + + table []uint32 + chain []uint16 + + history []byte +} + +func (q *M4) Reset() { + for i := range q.table { + q.table[i] = 0 + } + q.history = q.history[:0] + q.chain = q.chain[:0] +} + +func (q *M4) score(m absoluteMatch) int { + return (m.End-m.Start)*256 + bits.LeadingZeros32(uint32(m.Start-m.Match))*q.DistanceBitCost +} + +func (q *M4) FindMatches(dst []Match, src []byte) []Match { + if q.MaxDistance == 0 { + q.MaxDistance = 65535 + } + if q.MinLength == 0 { + q.MinLength = 4 + } + if q.HashLen == 0 { + q.HashLen = 6 + } + if q.TableBits == 0 { + q.TableBits = 17 + } + if len(q.table) < 1< q.MaxDistance*2 { + // Trim down the history buffer. + delta := len(q.history) - q.MaxDistance + copy(q.history, q.history[delta:]) + q.history = q.history[:q.MaxDistance] + if q.ChainLength > 0 { + q.chain = q.chain[:q.MaxDistance] + } + + for i, v := range q.table { + newV := int(v) - delta + if newV < 0 { + newV = 0 + } + q.table[i] = uint32(newV) + } + } + + // Append src to the history buffer. + e.NextEmit = len(q.history) + q.history = append(q.history, src...) + if q.ChainLength > 0 { + q.chain = append(q.chain, make([]uint16, len(src))...) + } + src = q.history + + // matches stores the matches that have been found but not emitted, + // in reverse order. (matches[0] is the most recent one.) + var matches [3]absoluteMatch + for i := e.NextEmit; i < len(src)-7; i++ { + if matches[0] != (absoluteMatch{}) && i >= matches[0].End { + // We have found some matches, and we're far enough along that we probably + // won't find overlapping matches, so we might as well emit them. + if matches[1] != (absoluteMatch{}) { + e.trim(matches[1], matches[0].Start, q.MinLength) + } + e.emit(matches[0]) + matches = [3]absoluteMatch{} + } + + // Calculate and store the hash. + h := ((binary.LittleEndian.Uint64(src[i:]) & (1<<(8*q.HashLen) - 1)) * hashMul64) >> (64 - q.TableBits) + candidate := int(q.table[h]) + q.table[h] = uint32(i) + if q.ChainLength > 0 && candidate != 0 { + delta := i - candidate + if delta < 1<<16 { + q.chain[i] = uint16(delta) + } + } + + if i < matches[0].End && i != matches[0].End+2-q.HashLen { + continue + } + if candidate == 0 || i-candidate > q.MaxDistance { + continue + } + + // Look for a match. + var currentMatch absoluteMatch + + if i-candidate != matches[0].Start-matches[0].Match { + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength { + currentMatch = m + } + } + } + + for j := 0; j < q.ChainLength; j++ { + delta := q.chain[candidate] + if delta == 0 { + break + } + candidate -= int(delta) + if candidate <= 0 || i-candidate > q.MaxDistance { + break + } + if i-candidate != matches[0].Start-matches[0].Match { + if binary.LittleEndian.Uint32(src[candidate:]) == binary.LittleEndian.Uint32(src[i:]) { + m := extendMatch2(src, i, candidate, e.NextEmit) + if m.End-m.Start > q.MinLength && q.score(m) > q.score(currentMatch) { + currentMatch = m + } + } + } + } + + if currentMatch.End-currentMatch.Start < q.MinLength { + continue + } + + overlapPenalty := 0 + if matches[0] != (absoluteMatch{}) { + overlapPenalty = 275 + if currentMatch.Start <= matches[1].End { + // This match would completely replace the previous match, + // so there is no penalty for overlap. + overlapPenalty = 0 + } + } + + if q.score(currentMatch) <= q.score(matches[0])+overlapPenalty { + continue + } + + matches = [3]absoluteMatch{ + currentMatch, + matches[0], + matches[1], + } + + if matches[2] == (absoluteMatch{}) { + continue + } + + // We have three matches, so it's time to emit one and/or eliminate one. + switch { + case matches[0].Start < matches[2].End: + // The first and third matches overlap; discard the one in between. + matches = [3]absoluteMatch{ + matches[0], + matches[2], + absoluteMatch{}, + } + + case matches[0].Start < matches[2].End+q.MinLength: + // The first and third matches don't overlap, but there's no room for + // another match between them. Emit the first match and discard the second. + e.emit(matches[2]) + matches = [3]absoluteMatch{ + matches[0], + absoluteMatch{}, + absoluteMatch{}, + } + + default: + // Emit the first match, shortening it if necessary to avoid overlap with the second. + e.trim(matches[2], matches[1].Start, q.MinLength) + matches[2] = absoluteMatch{} + } + } + + // We've found all the matches now; emit the remaining ones. + if matches[1] != (absoluteMatch{}) { + e.trim(matches[1], matches[0].Start, q.MinLength) + } + if matches[0] != (absoluteMatch{}) { + e.emit(matches[0]) + } + + dst = e.Dst + if e.NextEmit < len(src) { + dst = append(dst, Match{ + Unmatched: len(src) - e.NextEmit, + }) + } + + return dst +} + +const hashMul64 = 0x1E35A7BD1E35A7BD + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + switch runtime.GOARCH { + case "amd64": + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + for j+8 < len(src) { + iBytes := binary.LittleEndian.Uint64(src[i:]) + jBytes := binary.LittleEndian.Uint64(src[j:]) + if iBytes != jBytes { + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + return j + bits.TrailingZeros64(iBytes^jBytes)>>3 + } + i, j = i+8, j+8 + } + case "386": + // On a 32-bit CPU, we do it 4 bytes at a time. + for j+4 < len(src) { + iBytes := binary.LittleEndian.Uint32(src[i:]) + jBytes := binary.LittleEndian.Uint32(src[j:]) + if iBytes != jBytes { + return j + bits.TrailingZeros32(iBytes^jBytes)>>3 + } + i, j = i+4, j+4 + } + } + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +// Given a 4-byte match at src[start] and src[candidate], extendMatch2 extends it +// upward as far as possible, and downward no farther than to min. +func extendMatch2(src []byte, start, candidate, min int) absoluteMatch { + end := extendMatch(src, candidate+4, start+4) + for start > min && candidate > 0 && src[start-1] == src[candidate-1] { + start-- + candidate-- + } + return absoluteMatch{ + Start: start, + End: end, + Match: candidate, + } +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go b/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go new file mode 100644 index 00000000..f6bcfdb3 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/matchfinder.go @@ -0,0 +1,103 @@ +// The matchfinder package defines reusable components for data compression. +// +// Many compression libraries have two main parts: +// - Something that looks for repeated sequences of bytes +// - An encoder for the compressed data format (often an entropy coder) +// +// Although these are logically two separate steps, the implementations are +// usually closely tied together. You can't use flate's matcher with snappy's +// encoder, for example. This package defines interfaces and an intermediate +// representation to allow mixing and matching compression components. +package matchfinder + +import "io" + +// A Match is the basic unit of LZ77 compression. +type Match struct { + Unmatched int // the number of unmatched bytes since the previous match + Length int // the number of bytes in the matched string; it may be 0 at the end of the input + Distance int // how far back in the stream to copy from +} + +// A MatchFinder performs the LZ77 stage of compression, looking for matches. +type MatchFinder interface { + // FindMatches looks for matches in src, appends them to dst, and returns dst. + FindMatches(dst []Match, src []byte) []Match + + // Reset clears any internal state, preparing the MatchFinder to be used with + // a new stream. + Reset() +} + +// An Encoder encodes the data in its final format. +type Encoder interface { + // Encode appends the encoded format of src to dst, using the match + // information from matches. + Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte + + // Reset clears any internal state, preparing the Encoder to be used with + // a new stream. + Reset() +} + +// A Writer uses MatchFinder and Encoder to write compressed data to Dest. +type Writer struct { + Dest io.Writer + MatchFinder MatchFinder + Encoder Encoder + + // BlockSize is the number of bytes to compress at a time. If it is zero, + // each Write operation will be treated as one block. + BlockSize int + + err error + inBuf []byte + outBuf []byte + matches []Match +} + +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + + if w.BlockSize == 0 { + return w.writeBlock(p, false) + } + + w.inBuf = append(w.inBuf, p...) + var pos int + for pos = 0; pos+w.BlockSize <= len(w.inBuf) && w.err == nil; pos += w.BlockSize { + w.writeBlock(w.inBuf[pos:pos+w.BlockSize], false) + } + if pos > 0 { + n := copy(w.inBuf, w.inBuf[pos:]) + w.inBuf = w.inBuf[:n] + } + + return len(p), w.err +} + +func (w *Writer) writeBlock(p []byte, lastBlock bool) (n int, err error) { + w.outBuf = w.outBuf[:0] + w.matches = w.MatchFinder.FindMatches(w.matches[:0], p) + w.outBuf = w.Encoder.Encode(w.outBuf, p, w.matches, lastBlock) + _, w.err = w.Dest.Write(w.outBuf) + return len(p), w.err +} + +func (w *Writer) Close() error { + w.writeBlock(w.inBuf, true) + w.inBuf = w.inBuf[:0] + return w.err +} + +func (w *Writer) Reset(newDest io.Writer) { + w.MatchFinder.Reset() + w.Encoder.Reset() + w.err = nil + w.inBuf = w.inBuf[:0] + w.outBuf = w.outBuf[:0] + w.matches = w.matches[:0] + w.Dest = newDest +} diff --git a/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go b/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go new file mode 100644 index 00000000..75ecc590 --- /dev/null +++ b/vendor/github.com/andybalholm/brotli/matchfinder/textencoder.go @@ -0,0 +1,53 @@ +package matchfinder + +import "fmt" + +// A TextEncoder is an Encoder that produces a human-readable representation of +// the LZ77 compression. Matches are replaced with symbols. +type TextEncoder struct{} + +func (t TextEncoder) Reset() {} + +func (t TextEncoder) Encode(dst []byte, src []byte, matches []Match, lastBlock bool) []byte { + pos := 0 + for _, m := range matches { + if m.Unmatched > 0 { + dst = append(dst, src[pos:pos+m.Unmatched]...) + pos += m.Unmatched + } + if m.Length > 0 { + dst = append(dst, []byte(fmt.Sprintf("<%d,%d>", m.Length, m.Distance))...) + pos += m.Length + } + } + if pos < len(src) { + dst = append(dst, src[pos:]...) + } + return dst +} + +// A NoMatchFinder implements MatchFinder, but doesn't find any matches. +// It can be used to implement the equivalent of the standard library flate package's +// HuffmanOnly setting. +type NoMatchFinder struct{} + +func (n NoMatchFinder) Reset() {} + +func (n NoMatchFinder) FindMatches(dst []Match, src []byte) []Match { + return append(dst, Match{ + Unmatched: len(src), + }) +} + +// AutoReset wraps a MatchFinder that can return references to data in previous +// blocks, and calls Reset before each block. It is useful for (e.g.) using a +// snappy Encoder with a MatchFinder designed for flate. (Snappy doesn't +// support references between blocks.) +type AutoReset struct { + MatchFinder +} + +func (a AutoReset) FindMatches(dst []Match, src []byte) []Match { + a.Reset() + return a.MatchFinder.FindMatches(dst, src) +} diff --git a/vendor/github.com/andybalholm/brotli/writer.go b/vendor/github.com/andybalholm/brotli/writer.go index 39feaef5..8a688117 100644 --- a/vendor/github.com/andybalholm/brotli/writer.go +++ b/vendor/github.com/andybalholm/brotli/writer.go @@ -3,6 +3,8 @@ package brotli import ( "errors" "io" + + "github.com/andybalholm/brotli/matchfinder" ) const ( @@ -117,3 +119,44 @@ type nopCloser struct { } func (nopCloser) Close() error { return nil } + +// NewWriterV2 is like NewWriterLevel, but it uses the new implementation +// based on the matchfinder package. It currently supports up to level 7; +// if a higher level is specified, level 7 will be used. +func NewWriterV2(dst io.Writer, level int) *matchfinder.Writer { + var mf matchfinder.MatchFinder + if level < 2 { + mf = matchfinder.M0{Lazy: level == 1} + } else { + hashLen := 6 + if level >= 6 { + hashLen = 5 + } + chainLen := 64 + switch level { + case 2: + chainLen = 0 + case 3: + chainLen = 1 + case 4: + chainLen = 2 + case 5: + chainLen = 4 + case 6: + chainLen = 8 + } + mf = &matchfinder.M4{ + MaxDistance: 1 << 20, + ChainLength: chainLen, + HashLen: hashLen, + DistanceBitCost: 57, + } + } + + return &matchfinder.Writer{ + Dest: dst, + MatchFinder: mf, + Encoder: &Encoder{}, + BlockSize: 1 << 16, + } +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/list.go b/vendor/github.com/apache/arrow/go/v12/arrow/array/list.go deleted file mode 100644 index 168798f0..00000000 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/list.go +++ /dev/null @@ -1,589 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package array - -import ( - "bytes" - "fmt" - "strings" - "sync/atomic" - - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" -) - -type ListLike interface { - arrow.Array - ListValues() arrow.Array - ValueOffsets(i int) (start, end int64) -} - -// List represents an immutable sequence of array values. -type List struct { - array - values arrow.Array - offsets []int32 -} - -// NewListData returns a new List array value, from data. -func NewListData(data arrow.ArrayData) *List { - a := &List{} - a.refCount = 1 - a.setData(data.(*Data)) - return a -} - -func (a *List) ListValues() arrow.Array { return a.values } - -func (a *List) String() string { - o := new(strings.Builder) - o.WriteString("[") - for i := 0; i < a.Len(); i++ { - if i > 0 { - o.WriteString(" ") - } - if !a.IsValid(i) { - o.WriteString("(null)") - continue - } - sub := a.newListValue(i) - fmt.Fprintf(o, "%v", sub) - sub.Release() - } - o.WriteString("]") - return o.String() -} - -func (a *List) newListValue(i int) arrow.Array { - j := i + a.array.data.offset - beg := int64(a.offsets[j]) - end := int64(a.offsets[j+1]) - return NewSlice(a.values, beg, end) -} - -func (a *List) setData(data *Data) { - a.array.setData(data) - vals := data.buffers[1] - if vals != nil { - a.offsets = arrow.Int32Traits.CastFromBytes(vals.Bytes()) - } - a.values = MakeFromData(data.childData[0]) -} - -func (a *List) GetOneForMarshal(i int) interface{} { - if a.IsNull(i) { - return nil - } - - slice := a.newListValue(i) - defer slice.Release() - v, err := json.Marshal(slice) - if err != nil { - panic(err) - } - return json.RawMessage(v) -} - -func (a *List) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - - buf.WriteByte('[') - for i := 0; i < a.Len(); i++ { - if i != 0 { - buf.WriteByte(',') - } - if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { - return nil, err - } - } - buf.WriteByte(']') - return buf.Bytes(), nil -} - -func arrayEqualList(left, right *List) bool { - for i := 0; i < left.Len(); i++ { - if left.IsNull(i) { - continue - } - o := func() bool { - l := left.newListValue(i) - defer l.Release() - r := right.newListValue(i) - defer r.Release() - return Equal(l, r) - }() - if !o { - return false - } - } - return true -} - -// Len returns the number of elements in the array. -func (a *List) Len() int { return a.array.Len() } - -func (a *List) Offsets() []int32 { return a.offsets } - -func (a *List) Retain() { - a.array.Retain() - a.values.Retain() -} - -func (a *List) Release() { - a.array.Release() - a.values.Release() -} - -func (a *List) ValueOffsets(i int) (start, end int64) { - debug.Assert(i >= 0 && i < a.array.data.length, "index out of range") - start, end = int64(a.offsets[i+a.data.offset]), int64(a.offsets[i+a.data.offset+1]) - return -} - -// LargeList represents an immutable sequence of array values. -type LargeList struct { - array - values arrow.Array - offsets []int64 -} - -// NewLargeListData returns a new LargeList array value, from data. -func NewLargeListData(data arrow.ArrayData) *LargeList { - a := new(LargeList) - a.refCount = 1 - a.setData(data.(*Data)) - return a -} - -func (a *LargeList) ListValues() arrow.Array { return a.values } - -func (a *LargeList) String() string { - o := new(strings.Builder) - o.WriteString("[") - for i := 0; i < a.Len(); i++ { - if i > 0 { - o.WriteString(" ") - } - if !a.IsValid(i) { - o.WriteString("(null)") - continue - } - sub := a.newListValue(i) - fmt.Fprintf(o, "%v", sub) - sub.Release() - } - o.WriteString("]") - return o.String() -} - -func (a *LargeList) newListValue(i int) arrow.Array { - j := i + a.array.data.offset - beg := int64(a.offsets[j]) - end := int64(a.offsets[j+1]) - return NewSlice(a.values, beg, end) -} - -func (a *LargeList) setData(data *Data) { - a.array.setData(data) - vals := data.buffers[1] - if vals != nil { - a.offsets = arrow.Int64Traits.CastFromBytes(vals.Bytes()) - } - a.values = MakeFromData(data.childData[0]) -} - -func (a *LargeList) GetOneForMarshal(i int) interface{} { - if a.IsNull(i) { - return nil - } - - slice := a.newListValue(i) - defer slice.Release() - v, err := json.Marshal(slice) - if err != nil { - panic(err) - } - return json.RawMessage(v) -} - -func (a *LargeList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - - buf.WriteByte('[') - for i := 0; i < a.Len(); i++ { - if i != 0 { - buf.WriteByte(',') - } - if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { - return nil, err - } - } - buf.WriteByte(']') - return buf.Bytes(), nil -} - -func arrayEqualLargeList(left, right *LargeList) bool { - for i := 0; i < left.Len(); i++ { - if left.IsNull(i) { - continue - } - o := func() bool { - l := left.newListValue(i) - defer l.Release() - r := right.newListValue(i) - defer r.Release() - return Equal(l, r) - }() - if !o { - return false - } - } - return true -} - -// Len returns the number of elements in the array. -func (a *LargeList) Len() int { return a.array.Len() } - -func (a *LargeList) Offsets() []int64 { return a.offsets } - -func (a *LargeList) ValueOffsets(i int) (start, end int64) { - debug.Assert(i >= 0 && i < a.array.data.length, "index out of range") - start, end = a.offsets[i], a.offsets[i+1] - return -} - -func (a *LargeList) Retain() { - a.array.Retain() - a.values.Retain() -} - -func (a *LargeList) Release() { - a.array.Release() - a.values.Release() -} - -type baseListBuilder struct { - builder - - values Builder // value builder for the list's elements. - offsets Builder - - // actual list type - dt arrow.DataType - appendOffsetVal func(int) -} - -type ListLikeBuilder interface { - Builder - ValueBuilder() Builder - Append(bool) -} - -type ListBuilder struct { - baseListBuilder -} - -type LargeListBuilder struct { - baseListBuilder -} - -// NewListBuilder returns a builder, using the provided memory allocator. -// The created list builder will create a list whose elements will be of type etype. -func NewListBuilder(mem memory.Allocator, etype arrow.DataType) *ListBuilder { - offsetBldr := NewInt32Builder(mem) - return &ListBuilder{ - baseListBuilder{ - builder: builder{refCount: 1, mem: mem}, - values: NewBuilder(mem, etype), - offsets: offsetBldr, - dt: arrow.ListOf(etype), - appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) }, - }, - } -} - -// NewListBuilderWithField takes a field to use for the child rather than just -// a datatype to allow for more customization. -func NewListBuilderWithField(mem memory.Allocator, field arrow.Field) *ListBuilder { - offsetBldr := NewInt32Builder(mem) - return &ListBuilder{ - baseListBuilder{ - builder: builder{refCount: 1, mem: mem}, - values: NewBuilder(mem, field.Type), - offsets: offsetBldr, - dt: arrow.ListOfField(field), - appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) }, - }, - } -} - -func (b *baseListBuilder) Type() arrow.DataType { - switch dt := b.dt.(type) { - case *arrow.ListType: - f := dt.ElemField() - f.Type = b.values.Type() - return arrow.ListOfField(f) - case *arrow.LargeListType: - f := dt.ElemField() - f.Type = b.values.Type() - return arrow.LargeListOfField(f) - } - return nil -} - -// NewLargeListBuilder returns a builder, using the provided memory allocator. -// The created list builder will create a list whose elements will be of type etype. -func NewLargeListBuilder(mem memory.Allocator, etype arrow.DataType) *LargeListBuilder { - offsetBldr := NewInt64Builder(mem) - return &LargeListBuilder{ - baseListBuilder{ - builder: builder{refCount: 1, mem: mem}, - values: NewBuilder(mem, etype), - offsets: offsetBldr, - dt: arrow.LargeListOf(etype), - appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) }, - }, - } -} - -// NewLargeListBuilderWithField takes a field rather than just an element type -// to allow for more customization of the final type of the LargeList Array -func NewLargeListBuilderWithField(mem memory.Allocator, field arrow.Field) *LargeListBuilder { - offsetBldr := NewInt64Builder(mem) - return &LargeListBuilder{ - baseListBuilder{ - builder: builder{refCount: 1, mem: mem}, - values: NewBuilder(mem, field.Type), - offsets: offsetBldr, - dt: arrow.LargeListOfField(field), - appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) }, - }, - } -} - -// Release decreases the reference count by 1. -// When the reference count goes to zero, the memory is freed. -func (b *baseListBuilder) Release() { - debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") - - if atomic.AddInt64(&b.refCount, -1) == 0 { - if b.nullBitmap != nil { - b.nullBitmap.Release() - b.nullBitmap = nil - } - b.values.Release() - b.offsets.Release() - } - -} - -func (b *baseListBuilder) appendNextOffset() { - b.appendOffsetVal(b.values.Len()) -} - -func (b *baseListBuilder) Append(v bool) { - b.Reserve(1) - b.unsafeAppendBoolToBitmap(v) - b.appendNextOffset() -} - -func (b *baseListBuilder) AppendNull() { - b.Reserve(1) - b.unsafeAppendBoolToBitmap(false) - b.appendNextOffset() -} - -func (b *baseListBuilder) AppendEmptyValue() { - b.Append(true) -} - -func (b *ListBuilder) AppendValues(offsets []int32, valid []bool) { - b.Reserve(len(valid)) - b.offsets.(*Int32Builder).AppendValues(offsets, nil) - b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) -} - -func (b *LargeListBuilder) AppendValues(offsets []int64, valid []bool) { - b.Reserve(len(valid)) - b.offsets.(*Int64Builder).AppendValues(offsets, nil) - b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) -} - -func (b *baseListBuilder) unsafeAppendBoolToBitmap(isValid bool) { - if isValid { - bitutil.SetBit(b.nullBitmap.Bytes(), b.length) - } else { - b.nulls++ - } - b.length++ -} - -func (b *baseListBuilder) init(capacity int) { - b.builder.init(capacity) - b.offsets.init(capacity + 1) -} - -// Reserve ensures there is enough space for appending n elements -// by checking the capacity and calling Resize if necessary. -func (b *baseListBuilder) Reserve(n int) { - b.builder.reserve(n, b.resizeHelper) - b.offsets.Reserve(n) -} - -// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), -// additional memory will be allocated. If n is smaller, the allocated memory may reduced. -func (b *baseListBuilder) Resize(n int) { - b.resizeHelper(n) - b.offsets.Resize(n) -} - -func (b *baseListBuilder) resizeHelper(n int) { - if n < minBuilderCapacity { - n = minBuilderCapacity - } - - if b.capacity == 0 { - b.init(n) - } else { - b.builder.resize(n, b.builder.init) - } -} - -func (b *baseListBuilder) ValueBuilder() Builder { - return b.values -} - -// NewArray creates a List array from the memory buffers used by the builder and resets the ListBuilder -// so it can be used to build a new array. -func (b *ListBuilder) NewArray() arrow.Array { - return b.NewListArray() -} - -// NewArray creates a LargeList array from the memory buffers used by the builder and resets the LargeListBuilder -// so it can be used to build a new array. -func (b *LargeListBuilder) NewArray() arrow.Array { - return b.NewLargeListArray() -} - -// NewListArray creates a List array from the memory buffers used by the builder and resets the ListBuilder -// so it can be used to build a new array. -func (b *ListBuilder) NewListArray() (a *List) { - data := b.newData() - a = NewListData(data) - data.Release() - return -} - -// NewLargeListArray creates a List array from the memory buffers used by the builder and resets the LargeListBuilder -// so it can be used to build a new array. -func (b *LargeListBuilder) NewLargeListArray() (a *LargeList) { - data := b.newData() - a = NewLargeListData(data) - data.Release() - return -} - -func (b *baseListBuilder) newData() (data *Data) { - if b.offsets.Len() != b.length+1 { - b.appendNextOffset() - } - values := b.values.NewArray() - defer values.Release() - - var offsets *memory.Buffer - if b.offsets != nil { - arr := b.offsets.NewArray() - defer arr.Release() - offsets = arr.Data().Buffers()[1] - } - - data = NewData( - b.Type(), b.length, - []*memory.Buffer{ - b.nullBitmap, - offsets, - }, - []arrow.ArrayData{values.Data()}, - b.nulls, - 0, - ) - b.reset() - - return -} - -func (b *baseListBuilder) UnmarshalOne(dec *json.Decoder) error { - t, err := dec.Token() - if err != nil { - return err - } - - switch t { - case json.Delim('['): - b.Append(true) - if err := b.values.Unmarshal(dec); err != nil { - return err - } - // consume ']' - _, err := dec.Token() - return err - case nil: - b.AppendNull() - default: - return &json.UnmarshalTypeError{ - Value: fmt.Sprint(t), - Struct: b.dt.String(), - } - } - - return nil -} - -func (b *baseListBuilder) Unmarshal(dec *json.Decoder) error { - for dec.More() { - if err := b.UnmarshalOne(dec); err != nil { - return err - } - } - return nil -} - -func (b *baseListBuilder) UnmarshalJSON(data []byte) error { - dec := json.NewDecoder(bytes.NewReader(data)) - t, err := dec.Token() - if err != nil { - return err - } - - if delim, ok := t.(json.Delim); !ok || delim != '[' { - return fmt.Errorf("list builder must unpack from json array, found %s", delim) - } - - return b.Unmarshal(dec) -} - -var ( - _ arrow.Array = (*List)(nil) - _ arrow.Array = (*LargeList)(nil) - _ Builder = (*ListBuilder)(nil) - _ Builder = (*LargeListBuilder)(nil) -) diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/.gitignore b/vendor/github.com/apache/arrow/go/v12/parquet/.gitignore deleted file mode 100644 index 4120c511..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/.gitignore +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/brotli.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/brotli.go deleted file mode 100644 index a79a4f89..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/compress/brotli.go +++ /dev/null @@ -1,115 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compress - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/andybalholm/brotli" - "github.com/apache/arrow/go/v12/parquet/internal/debug" -) - -type brotliCodec struct{} - -func (brotliCodec) NewReader(r io.Reader) io.ReadCloser { - return ioutil.NopCloser(brotli.NewReader(r)) -} - -func (b brotliCodec) EncodeLevel(dst, src []byte, level int) []byte { - if level == DefaultCompressionLevel { - level = brotli.DefaultCompression - } - - maxlen := int(b.CompressBound(int64(len(src)))) - if dst == nil || cap(dst) < maxlen { - dst = make([]byte, 0, maxlen) - } - buf := bytes.NewBuffer(dst[:0]) - w := brotli.NewWriterLevel(buf, level) - _, err := w.Write(src) - if err != nil { - panic(err) - } - if err := w.Close(); err != nil { - panic(err) - } - return buf.Bytes() -} - -func (b brotliCodec) Encode(dst, src []byte) []byte { - return b.EncodeLevel(dst, src, brotli.DefaultCompression) -} - -func (brotliCodec) Decode(dst, src []byte) []byte { - rdr := brotli.NewReader(bytes.NewReader(src)) - if dst != nil { - var ( - sofar = 0 - n = -1 - err error = nil - ) - for n != 0 && err == nil { - n, err = rdr.Read(dst[sofar:]) - sofar += n - } - if err != nil && err != io.EOF { - panic(err) - } - return dst[:sofar] - } - - dst, err := ioutil.ReadAll(rdr) - if err != nil { - panic(err) - } - - return dst -} - -// taken from brotli/enc/encode.c:1426 -// BrotliEncoderMaxCompressedSize -func (brotliCodec) CompressBound(len int64) int64 { - // [window bits / empty metadata] + N * [uncompressed] + [last empty] - debug.Assert(len > 0, "brotli compressbound should be > 0") - nlarge := len >> 14 - overhead := 2 + (4 * nlarge) + 3 + 1 - result := len + overhead - if len == 0 { - return 2 - } - if result < len { - return 0 - } - return len -} - -func (brotliCodec) NewWriter(w io.Writer) io.WriteCloser { - return brotli.NewWriter(w) -} - -func (brotliCodec) NewWriterLevel(w io.Writer, level int) (io.WriteCloser, error) { - if level == DefaultCompressionLevel { - level = brotli.DefaultCompression - } - return brotli.NewWriterLevel(w, level), nil -} - -func init() { - codecs[Codecs.Brotli] = brotliCodec{} -} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/compress.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/compress.go deleted file mode 100644 index 09282a31..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/compress/compress.go +++ /dev/null @@ -1,156 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compress contains the interfaces and implementations for handling compression/decompression -// of parquet data at the column levels. -package compress - -import ( - "compress/flate" - "fmt" - "io" - "io/ioutil" - - "github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet" -) - -// Compression is an alias to the thrift compression codec enum type for easy use -type Compression parquet.CompressionCodec - -func (c Compression) String() string { - return parquet.CompressionCodec(c).String() -} - -// DefaultCompressionLevel will use flate.DefaultCompression since many of the compression libraries -// use that to denote "use the default". -const DefaultCompressionLevel = flate.DefaultCompression - -// Codecs is a useful struct to provide namespaced enum values to use for specifying the compression type to use -// which make for easy internal swapping between them and the thrift enum since they are initialized to the same -// constant values. -var Codecs = struct { - Uncompressed Compression - Snappy Compression - Gzip Compression - // LZO is unsupported in this library since LZO license is incompatible with Apache License - Lzo Compression - Brotli Compression - // LZ4 unsupported in this library due to problematic issues between the Hadoop LZ4 spec vs regular lz4 - // see: http://mail-archives.apache.org/mod_mbox/arrow-dev/202007.mbox/%3CCAAri41v24xuA8MGHLDvgSnE+7AAgOhiEukemW_oPNHMvfMmrWw@mail.gmail.com%3E - Lz4 Compression - Zstd Compression -}{ - Uncompressed: Compression(parquet.CompressionCodec_UNCOMPRESSED), - Snappy: Compression(parquet.CompressionCodec_SNAPPY), - Gzip: Compression(parquet.CompressionCodec_GZIP), - Lzo: Compression(parquet.CompressionCodec_LZO), - Brotli: Compression(parquet.CompressionCodec_BROTLI), - Lz4: Compression(parquet.CompressionCodec_LZ4), - Zstd: Compression(parquet.CompressionCodec_ZSTD), -} - -// Codec is an interface which is implemented for each compression type in order to make the interactions easy to -// implement. Most consumers won't be calling GetCodec directly. -type Codec interface { - // NewReader provides a reader that wraps a stream with compressed data to stream the uncompressed data - NewReader(io.Reader) io.ReadCloser - // NewWriter provides a wrapper around a write stream to compress data before writing it. - NewWriter(io.Writer) io.WriteCloser - // NewWriterLevel is like NewWriter but allows specifying the compression level - NewWriterLevel(io.Writer, int) (io.WriteCloser, error) - // Encode encodes a block of data given by src and returns the compressed block. dst should be either nil - // or sized large enough to fit the compressed block (use CompressBound to allocate). dst and src should not - // overlap since some of the compression types don't allow it. - // - // The returned slice will be one of the following: - // 1. If dst was nil or dst was too small to fit the compressed data, it will be a newly allocated slice - // 2. If dst was large enough to fit the compressed data (depending on the compression algorithm it might - // be required to be at least CompressBound length) then it might be a slice of dst. - Encode(dst, src []byte) []byte - // EncodeLevel is like Encode, but specifies a particular encoding level instead of the default. - EncodeLevel(dst, src []byte, level int) []byte - // CompressBound returns the boundary of maximum size of compressed data under the chosen codec. - CompressBound(int64) int64 - // Decode is for decoding a single block rather than a stream, like with Encode, dst must be either nil or - // sized large enough to accommodate the uncompressed data and should not overlap with src. - // - // the returned slice *might* be a slice of dst. - Decode(dst, src []byte) []byte -} - -var codecs = map[Compression]Codec{} - -type nocodec struct{} - -func (nocodec) NewReader(r io.Reader) io.ReadCloser { - ret, ok := r.(io.ReadCloser) - if !ok { - return ioutil.NopCloser(r) - } - return ret -} - -func (nocodec) Decode(dst, src []byte) []byte { - if dst != nil { - copy(dst, src) - } - return dst -} - -type writerNopCloser struct { - io.Writer -} - -func (writerNopCloser) Close() error { - return nil -} - -func (nocodec) Encode(dst, src []byte) []byte { - copy(dst, src) - return dst -} - -func (nocodec) EncodeLevel(dst, src []byte, _ int) []byte { - copy(dst, src) - return dst -} - -func (nocodec) NewWriter(w io.Writer) io.WriteCloser { - ret, ok := w.(io.WriteCloser) - if !ok { - return writerNopCloser{w} - } - return ret -} - -func (n nocodec) NewWriterLevel(w io.Writer, _ int) (io.WriteCloser, error) { - return n.NewWriter(w), nil -} - -func (nocodec) CompressBound(len int64) int64 { return len } - -func init() { - codecs[Codecs.Uncompressed] = nocodec{} -} - -// GetCodec returns a Codec interface for the requested Compression type -func GetCodec(typ Compression) (Codec, error) { - ret, ok := codecs[typ] - if !ok { - return nil, fmt.Errorf("compression for %s unimplemented", typ.String()) - } - return ret, nil -} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/gzip.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/gzip.go deleted file mode 100644 index c7514380..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/compress/gzip.go +++ /dev/null @@ -1,98 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compress - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - - "github.com/klauspost/compress/gzip" -) - -type gzipCodec struct{} - -func (gzipCodec) NewReader(r io.Reader) io.ReadCloser { - ret, err := gzip.NewReader(r) - if err != nil { - panic(fmt.Errorf("codec: gzip: %w", err)) - } - return ret -} - -func (gzipCodec) Decode(dst, src []byte) []byte { - rdr, err := gzip.NewReader(bytes.NewReader(src)) - if err != nil { - panic(err) - } - - if dst != nil { - n, err := io.ReadFull(rdr, dst) - if err != nil { - panic(err) - } - return dst[:n] - } - - dst, err = ioutil.ReadAll(rdr) - if err != nil { - panic(err) - } - - return dst -} - -func (g gzipCodec) EncodeLevel(dst, src []byte, level int) []byte { - maxlen := int(g.CompressBound(int64(len(src)))) - if dst == nil || cap(dst) < maxlen { - dst = make([]byte, 0, maxlen) - } - buf := bytes.NewBuffer(dst[:0]) - w, err := gzip.NewWriterLevel(buf, level) - if err != nil { - panic(err) - } - _, err = w.Write(src) - if err != nil { - panic(err) - } - if err := w.Close(); err != nil { - panic(err) - } - return buf.Bytes() -} - -func (g gzipCodec) Encode(dst, src []byte) []byte { - return g.EncodeLevel(dst, src, DefaultCompressionLevel) -} - -func (gzipCodec) CompressBound(len int64) int64 { - return len + ((len + 7) >> 3) + ((len + 63) >> 6) + 5 -} - -func (gzipCodec) NewWriter(w io.Writer) io.WriteCloser { - return gzip.NewWriter(w) -} - -func (gzipCodec) NewWriterLevel(w io.Writer, level int) (io.WriteCloser, error) { - return gzip.NewWriterLevel(w, level) -} - -func init() { - codecs[Codecs.Gzip] = gzipCodec{} -} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/snappy.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/snappy.go deleted file mode 100644 index 6468df78..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/compress/snappy.go +++ /dev/null @@ -1,62 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compress - -import ( - "io" - "io/ioutil" - - "github.com/golang/snappy" -) - -type snappyCodec struct{} - -func (snappyCodec) Encode(dst, src []byte) []byte { - return snappy.Encode(dst, src) -} - -func (snappyCodec) EncodeLevel(dst, src []byte, _ int) []byte { - return snappy.Encode(dst, src) -} - -func (snappyCodec) Decode(dst, src []byte) []byte { - dst, err := snappy.Decode(dst, src) - if err != nil { - panic(err) - } - return dst -} - -func (snappyCodec) NewReader(r io.Reader) io.ReadCloser { - return ioutil.NopCloser(snappy.NewReader(r)) -} - -func (snappyCodec) CompressBound(len int64) int64 { - return int64(snappy.MaxEncodedLen(int(len))) -} - -func (snappyCodec) NewWriter(w io.Writer) io.WriteCloser { - return snappy.NewBufferedWriter(w) -} - -func (s snappyCodec) NewWriterLevel(w io.Writer, _ int) (io.WriteCloser, error) { - return s.NewWriter(w), nil -} - -func init() { - codecs[Codecs.Snappy] = snappyCodec{} -} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/compress/zstd.go b/vendor/github.com/apache/arrow/go/v12/parquet/compress/zstd.go deleted file mode 100644 index 5db24f04..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/compress/zstd.go +++ /dev/null @@ -1,112 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compress - -import ( - "io" - "sync" - - "github.com/apache/arrow/go/v12/parquet/internal/debug" - "github.com/klauspost/compress/zstd" -) - -type zstdCodec struct{} - -type zstdcloser struct { - *zstd.Decoder -} - -var ( - enc *zstd.Encoder - dec *zstd.Decoder - initEncoder sync.Once - initDecoder sync.Once -) - -func getencoder() *zstd.Encoder { - initEncoder.Do(func() { - enc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true)) - }) - return enc -} - -func getdecoder() *zstd.Decoder { - initDecoder.Do(func() { - dec, _ = zstd.NewReader(nil) - }) - return dec -} - -func (zstdCodec) Decode(dst, src []byte) []byte { - dst, err := getdecoder().DecodeAll(src, dst[:0]) - if err != nil { - panic(err) - } - return dst -} - -func (z *zstdcloser) Close() error { - z.Decoder.Close() - return nil -} - -func (zstdCodec) NewReader(r io.Reader) io.ReadCloser { - ret, _ := zstd.NewReader(r) - return &zstdcloser{ret} -} - -func (zstdCodec) NewWriter(w io.Writer) io.WriteCloser { - ret, _ := zstd.NewWriter(w) - return ret -} - -func (zstdCodec) NewWriterLevel(w io.Writer, level int) (io.WriteCloser, error) { - var compressLevel zstd.EncoderLevel - if level == DefaultCompressionLevel { - compressLevel = zstd.SpeedDefault - } else { - compressLevel = zstd.EncoderLevelFromZstd(level) - } - return zstd.NewWriter(w, zstd.WithEncoderLevel(compressLevel)) -} - -func (z zstdCodec) Encode(dst, src []byte) []byte { - return getencoder().EncodeAll(src, dst[:0]) -} - -func (z zstdCodec) EncodeLevel(dst, src []byte, level int) []byte { - compressLevel := zstd.EncoderLevelFromZstd(level) - if level == DefaultCompressionLevel { - compressLevel = zstd.SpeedDefault - } - enc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true), zstd.WithEncoderLevel(compressLevel)) - return enc.EncodeAll(src, dst[:0]) -} - -// from zstd.h, ZSTD_COMPRESSBOUND -func (zstdCodec) CompressBound(len int64) int64 { - debug.Assert(len > 0, "len for zstd CompressBound should be > 0") - extra := ((128 << 10) - len) >> 11 - if len >= (128 << 10) { - extra = 0 - } - return len + (len >> 8) + extra -} - -func init() { - codecs[Codecs.Zstd] = zstdCodec{} -} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/doc.go b/vendor/github.com/apache/arrow/go/v12/parquet/doc.go deleted file mode 100644 index c3875996..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/doc.go +++ /dev/null @@ -1,70 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package parquet provides an implementation of Apache Parquet for Go. -// -// Apache Parquet is an open-source columnar data storage format using the record -// shredding and assembly algorithm to accommodate complex data structures which -// can then be used to efficiently store the data. -// -// While the go.mod states go1.18, everything here should be compatible -// with go versions 1.17 and 1.16. -// -// This implementation is a native go implementation for reading and writing the -// parquet file format. -// -// Install -// -// You can download the library and cli utilities via: -// go get -u github.com/apache/arrow/go/v12/parquet -// go install github.com/apache/arrow/go/v12/parquet/cmd/parquet_reader@latest -// go install github.com/apache/arrow/go/v12/parquet/cmd/parquet_schema@latest -// -// Modules -// -// This top level parquet package contains the basic common types and reader/writer -// properties along with some utilities that are used throughout the other modules. -// -// The file module contains the functions for directly reading/writing parquet files -// including Column Readers and Column Writers. -// -// The metadata module contains the types for managing the lower level file/rowgroup/column -// metadata inside of a ParquetFile including inspecting the statistics. -// -// The pqarrow module contains helper functions and types for converting directly -// between Parquet and Apache Arrow formats. -// -// The schema module contains the types for manipulating / inspecting / creating -// parquet file schemas. -// -// Primitive Types -// -// The Parquet Primitive Types and their corresponding Go types are Boolean (bool), -// Int32 (int32), Int64 (int64), Int96 (parquet.Int96), Float (float32), Double (float64), -// ByteArray (parquet.ByteArray) and FixedLenByteArray (parquet.FixedLenByteArray). -// -// Encodings -// -// The encoding types supported in this package are: -// Plain, Plain/RLE Dictionary, Delta Binary Packed (only integer types), Delta Byte Array -// (only ByteArray), Delta Length Byte Array (only ByteArray) -// -// Tip: Some platforms don't necessarily support all kinds of encodings. If you're not -// sure what to use, just use Plain and Dictionary encoding. -package parquet - -//go:generate go run golang.org/x/tools/cmd/stringer -type=Version -linecomment -//go:generate thrift -o internal -r --gen go ../../cpp/src/parquet/parquet.thrift diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/encryption_properties.go b/vendor/github.com/apache/arrow/go/v12/parquet/encryption_properties.go deleted file mode 100644 index 660ee786..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/encryption_properties.go +++ /dev/null @@ -1,711 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parquet - -import ( - "crypto/rand" - "unicode/utf8" - - format "github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet" -) - -// Constants that will be used as the default values with encryption/decryption -const ( - // By default we'll use AesGCM as our encryption algorithm - DefaultEncryptionAlgorithm = AesGcm - MaximalAadMetadataLength int32 = 256 - // if encryption is turned on, we will default to also encrypting the footer - DefaultEncryptedFooter = true - DefaultCheckSignature = true - // by default if you set the file decryption properties, we will error - // on any plaintext files unless otherwise specified. - DefaultAllowPlaintextFiles = false - AadFileUniqueLength int32 = 8 -) - -// ColumnPathToDecryptionPropsMap maps column paths to decryption properties -type ColumnPathToDecryptionPropsMap map[string]*ColumnDecryptionProperties - -// ColumnPathToEncryptionPropsMap maps column paths to encryption properties -type ColumnPathToEncryptionPropsMap map[string]*ColumnEncryptionProperties - -// ColumnEncryptionProperties specifies how to encrypt a given column -type ColumnEncryptionProperties struct { - columnPath string - encrypted bool - encryptedWithFooterKey bool - key string - keyMetadata string - utilized bool -} - -// ColumnPath returns which column these properties are for -func (ce *ColumnEncryptionProperties) ColumnPath() string { - return ce.columnPath -} - -// IsEncrypted returns true if this column is encrypted. -func (ce *ColumnEncryptionProperties) IsEncrypted() bool { return ce.encrypted } - -// IsEncryptedWithFooterKey returns if this column was encrypted with the footer key itself, or false if a separate -// key was used for encrypting this column. -func (ce *ColumnEncryptionProperties) IsEncryptedWithFooterKey() bool { - return ce.encryptedWithFooterKey -} - -// Key returns the key used for encrypting this column if it isn't encrypted by the footer key -func (ce *ColumnEncryptionProperties) Key() string { return ce.key } - -// KeyMetadata returns the key identifier which is used with a KeyRetriever to get the key for this column if it is not -// encrypted using the footer key -func (ce *ColumnEncryptionProperties) KeyMetadata() string { return ce.keyMetadata } - -// WipeOutEncryptionKey Clears the encryption key, used after completion of file writing -func (ce *ColumnEncryptionProperties) WipeOutEncryptionKey() { ce.key = "" } - -// IsUtilized returns whether or not these properties have already been used, if the key is empty -// then this is always false -func (ce *ColumnEncryptionProperties) IsUtilized() bool { - if ce.key == "" { - return false - } - return ce.utilized -} - -// SetUtilized is used for marking it as utilized once it is used in FileEncryptionProperties -// as the encryption key will be wiped out on completion of writing -func (ce *ColumnEncryptionProperties) SetUtilized() { - ce.utilized = true -} - -// Clone returns a instance of ColumnEncryptionProperties with the same key and metadata -func (ce *ColumnEncryptionProperties) Clone() *ColumnEncryptionProperties { - copy := ce.key - return NewColumnEncryptionProperties(ce.columnPath, WithKey(copy), WithKeyMetadata(ce.keyMetadata)) -} - -type colEncryptConfig struct { - key string - keyMetadata string - encrypted bool -} - -// ColumnEncryptOption how to specify options to the the NewColumnEncryptionProperties function. -type ColumnEncryptOption func(*colEncryptConfig) - -// WithKey sets a column specific key. -// If key is not set on an encrypted column, the column will be encrypted with the footer key. -// key length must be either 16, 24, or 32 bytes -// the key is cloned and will be wiped out (array values set to 0) upon completion of file writing. -// Caller is responsible for wiping out input key array -func WithKey(key string) ColumnEncryptOption { - return func(c *colEncryptConfig) { - if key != "" { - c.key = key - } - } -} - -// WithKeyMetadata sets the key retrieval metadata, use either KeyMetadata or KeyID but not both -func WithKeyMetadata(keyMeta string) ColumnEncryptOption { - return func(c *colEncryptConfig) { - c.keyMetadata = keyMeta - } -} - -// WithKeyID is a convenience function to set the key metadata using a string id. -// Set a key retrieval metadata (converted from String). and use either KeyMetadata or KeyID, not both. -// KeyID will be converted to metadata (UTF-8 Array) -func WithKeyID(keyID string) ColumnEncryptOption { - if !utf8.ValidString(keyID) { - panic("parquet: key id should be UTF8 encoded") - } - return WithKeyMetadata(keyID) -} - -// NewColumnEncryptionProperties constructs properties for the provided column path, modified by the options provided -func NewColumnEncryptionProperties(name string, opts ...ColumnEncryptOption) *ColumnEncryptionProperties { - var cfg colEncryptConfig - cfg.encrypted = true - for _, o := range opts { - o(&cfg) - } - return &ColumnEncryptionProperties{ - utilized: false, - encrypted: cfg.encrypted, - encryptedWithFooterKey: cfg.encrypted && cfg.key == "", - keyMetadata: cfg.keyMetadata, - key: cfg.key, - columnPath: name, - } -} - -// ColumnDecryptionProperties are the specifications for how to decrypt a given column. -type ColumnDecryptionProperties struct { - columnPath string - key string - utilized bool -} - -// NewColumnDecryptionProperties constructs a new ColumnDecryptionProperties for the given column path, modified by -// the provided options -func NewColumnDecryptionProperties(column string, opts ...ColumnDecryptOption) *ColumnDecryptionProperties { - var cfg columnDecryptConfig - for _, o := range opts { - o(&cfg) - } - - return &ColumnDecryptionProperties{ - columnPath: column, - utilized: false, - key: cfg.key, - } -} - -// ColumnPath returns which column these properties describe how to decrypt -func (cd *ColumnDecryptionProperties) ColumnPath() string { return cd.columnPath } - -// Key returns the key specified to decrypt this column, or is empty if the Footer Key should be used. -func (cd *ColumnDecryptionProperties) Key() string { return cd.key } - -// IsUtilized returns whether or not these properties have been used for decryption already -func (cd *ColumnDecryptionProperties) IsUtilized() bool { return cd.utilized } - -// SetUtilized is used by the reader to specify when we've decrypted the column and have used the key so we know -// to wipe out the keys. -func (cd *ColumnDecryptionProperties) SetUtilized() { cd.utilized = true } - -// WipeOutDecryptionKey is called after decryption to ensure the key doesn't stick around and get re-used. -func (cd *ColumnDecryptionProperties) WipeOutDecryptionKey() { cd.key = "" } - -// Clone returns a new instance of ColumnDecryptionProperties with the same key and column -func (cd *ColumnDecryptionProperties) Clone() *ColumnDecryptionProperties { - return NewColumnDecryptionProperties(cd.columnPath, WithDecryptKey(cd.key)) -} - -type columnDecryptConfig struct { - key string -} - -// ColumnDecryptOption is the type of the options passed for constructing Decryption Properties -type ColumnDecryptOption func(*columnDecryptConfig) - -// WithDecryptKey specifies the key to utilize for decryption -func WithDecryptKey(key string) ColumnDecryptOption { - return func(cfg *columnDecryptConfig) { - if key != "" { - cfg.key = key - } - } -} - -// AADPrefixVerifier is an interface for any object that can be used to verify the identity of the file being decrypted. -// It should panic if the provided AAD identity is bad. -// -// In a data set, AAD Prefixes should be collected, and then checked for missing files. -type AADPrefixVerifier interface { - // Verify identity of file. panic if bad - Verify(string) -} - -// DecryptionKeyRetriever is an interface for getting the desired key for decryption from metadata. It should take in -// some metadata identifier and return the actual Key to use for decryption. -type DecryptionKeyRetriever interface { - GetKey(keyMetadata []byte) string -} - -// FileDecryptionProperties define the File Level configuration for decrypting a parquet file. Once constructed they are -// read only. -type FileDecryptionProperties struct { - footerKey string - aadPrefix string - checkPlaintextFooterIntegrity bool - plaintextAllowed bool - utilized bool - columnDecryptProps ColumnPathToDecryptionPropsMap - Verifier AADPrefixVerifier - KeyRetriever DecryptionKeyRetriever -} - -// NewFileDecryptionProperties takes in the options for constructing a new FileDecryptionProperties object, otherwise -// it will use the default configuration which will check footer integrity of a plaintext footer for an encrypted file -// for unencrypted parquet files, the decryption properties should not be set. -func NewFileDecryptionProperties(opts ...FileDecryptionOption) *FileDecryptionProperties { - var cfg fileDecryptConfig - cfg.checkFooterIntegrity = DefaultCheckSignature - cfg.plaintextAllowed = DefaultAllowPlaintextFiles - for _, o := range opts { - o(&cfg) - } - return &FileDecryptionProperties{ - Verifier: cfg.verifier, - footerKey: cfg.footerKey, - checkPlaintextFooterIntegrity: cfg.checkFooterIntegrity, - KeyRetriever: cfg.retriever, - aadPrefix: cfg.aadPrefix, - columnDecryptProps: cfg.colDecrypt, - plaintextAllowed: cfg.plaintextAllowed, - utilized: false, - } -} - -// ColumnKey returns the key to be used for decrypting the provided column. -func (fd *FileDecryptionProperties) ColumnKey(path string) string { - if d, ok := fd.columnDecryptProps[path]; ok { - if d != nil { - return d.Key() - } - } - return "" -} - -// FooterKey returns the key utilized for decrypting the Footer if encrypted and any columns that are encrypted with -// the footer key. -func (fd *FileDecryptionProperties) FooterKey() string { return fd.footerKey } - -// AadPrefix returns the prefix to be supplied for constructing the identification strings when decrypting -func (fd *FileDecryptionProperties) AadPrefix() string { return fd.aadPrefix } - -// PlaintextFooterIntegrity returns whether or not an integrity check will be performed on a plaintext footer for an -// encrypted file. -func (fd *FileDecryptionProperties) PlaintextFooterIntegrity() bool { - return fd.checkPlaintextFooterIntegrity -} - -// PlaintextFilesAllowed returns whether or not this instance of decryption properties are allowed on a plaintext file. -func (fd *FileDecryptionProperties) PlaintextFilesAllowed() bool { return fd.plaintextAllowed } - -// SetUtilized is called to mark this instance as utilized once it is used to read a file. A single instance -// can be used for reading one file only. Setting this ensures the keys will be wiped out upon completion of file reading. -func (fd *FileDecryptionProperties) SetUtilized() { fd.utilized = true } - -// IsUtilized returns whether or not this instance has been used to decrypt a file. If the footer key and prefix are -// empty and there are no column decryption properties, then this is always false. -func (fd *FileDecryptionProperties) IsUtilized() bool { - if fd.footerKey == "" && len(fd.columnDecryptProps) == 0 && fd.aadPrefix == "" { - return false - } - return fd.utilized -} - -// WipeOutDecryptionKeys will clear all the keys for this instance including the column level ones, this will be called -// after this instance has been utilized. -func (fd *FileDecryptionProperties) WipeOutDecryptionKeys() { - fd.footerKey = "" - for _, cd := range fd.columnDecryptProps { - cd.WipeOutDecryptionKey() - } -} - -// Clone returns a new instance of these properties, changing the prefix if set (keeping the same prefix if left empty) -func (fd *FileDecryptionProperties) Clone(newAadPrefix string) *FileDecryptionProperties { - keyCopy := fd.footerKey - colDecryptMapCopy := make(ColumnPathToDecryptionPropsMap) - for k, v := range fd.columnDecryptProps { - colDecryptMapCopy[k] = v.Clone() - } - if newAadPrefix == "" { - newAadPrefix = fd.aadPrefix - } - return &FileDecryptionProperties{ - footerKey: keyCopy, - KeyRetriever: fd.KeyRetriever, - checkPlaintextFooterIntegrity: fd.checkPlaintextFooterIntegrity, - Verifier: fd.Verifier, - columnDecryptProps: colDecryptMapCopy, - aadPrefix: newAadPrefix, - plaintextAllowed: fd.plaintextAllowed, - utilized: false, - } -} - -type fileDecryptConfig struct { - footerKey string - aadPrefix string - verifier AADPrefixVerifier - colDecrypt ColumnPathToDecryptionPropsMap - retriever DecryptionKeyRetriever - checkFooterIntegrity bool - plaintextAllowed bool -} - -// FileDecryptionOption is how to supply options to constructing a new FileDecryptionProperties instance. -type FileDecryptionOption func(*fileDecryptConfig) - -// WithFooterKey sets an explicit footer key. If Applied on a file that contains footer key -// metadata the metadata will be ignored, the footer will be decrypted/verified with this key. -// -// If the explicit key is not set, footer key will be fetched from the key retriever. -// With explcit keys or AAD prefix, new encryption properties object must be created for each -// encrypted file. -// -// Explicit encryption keys (footer and column) are cloned. -// Upon completion of file reading, the cloned encryption keys in the properties will be wiped out -// Caller is responsible for wiping out the input key array -// footer key length must be either 16, 24, or 32 bytes -func WithFooterKey(key string) FileDecryptionOption { - return func(cfg *fileDecryptConfig) { - if key != "" { - cfg.footerKey = key - } - } -} - -// WithPrefixVerifier supplies a verifier object to use for verifying the AAD Prefixes stored in the file. -func WithPrefixVerifier(verifier AADPrefixVerifier) FileDecryptionOption { - return func(cfg *fileDecryptConfig) { - if verifier != nil { - cfg.verifier = verifier - } - } -} - -// WithColumnKeys sets explicit column keys. -// -// It's also possible to set a key retriever on this property object. -// -// Upon file decryption, availability of explicit keys is checked before invocation -// of the retreiver callback. -// -// If an explicit key is available for a footer or a column, its key metadata will be ignored. -func WithColumnKeys(decrypt ColumnPathToDecryptionPropsMap) FileDecryptionOption { - return func(cfg *fileDecryptConfig) { - if len(decrypt) == 0 { - return - } - if len(cfg.colDecrypt) != 0 { - panic("column properties already set") - } - for _, v := range decrypt { - if v.IsUtilized() { - panic("parquet: column properties utilized in another file") - } - v.SetUtilized() - } - cfg.colDecrypt = decrypt - } -} - -// WithKeyRetriever sets a key retriever callback. It's also possible to set explicit footer or column keys. -func WithKeyRetriever(retriever DecryptionKeyRetriever) FileDecryptionOption { - return func(cfg *fileDecryptConfig) { - if retriever != nil { - cfg.retriever = retriever - } - } -} - -// DisableFooterSignatureVerification skips integrity verification of plaintext footers. -// -// If not called, integrity of plaintext footers will be checked in runtime, and will panic -// if the footer signing key is not available -// or if the footer content and signature don't match -func DisableFooterSignatureVerification() FileDecryptionOption { - return func(cfg *fileDecryptConfig) { - cfg.checkFooterIntegrity = false - } -} - -// WithPlaintextAllowed sets allowing plaintext files. -// -// By default, reading plaintext (unencrypted) files is not allowed when using -// a decryptor. -// -// In order to detect files that were not encrypted by mistake. -// However the default behavior can be overridden by using this method. -func WithPlaintextAllowed() FileDecryptionOption { - return func(cfg *fileDecryptConfig) { - cfg.plaintextAllowed = true - } -} - -// WithDecryptAadPrefix explicitly supplies the file aad prefix. -// -// A must when a prefix is used for file encryption, but not stored in the file. -func WithDecryptAadPrefix(prefix string) FileDecryptionOption { - return func(cfg *fileDecryptConfig) { - if prefix != "" { - cfg.aadPrefix = prefix - } - } -} - -// Algorithm describes how something was encrypted, representing the EncryptionAlgorithm object from the -// parquet.thrift file. -type Algorithm struct { - Algo Cipher - Aad struct { - AadPrefix []byte - AadFileUnique []byte - SupplyAadPrefix bool - } -} - -// ToThrift returns an instance to be used for serializing when writing a file. -func (e Algorithm) ToThrift() *format.EncryptionAlgorithm { - if e.Algo == AesGcm { - return &format.EncryptionAlgorithm{ - AES_GCM_V1: &format.AesGcmV1{ - AadPrefix: e.Aad.AadPrefix, - AadFileUnique: e.Aad.AadFileUnique, - SupplyAadPrefix: &e.Aad.SupplyAadPrefix, - }, - } - } - return &format.EncryptionAlgorithm{ - AES_GCM_CTR_V1: &format.AesGcmCtrV1{ - AadPrefix: e.Aad.AadPrefix, - AadFileUnique: e.Aad.AadFileUnique, - SupplyAadPrefix: &e.Aad.SupplyAadPrefix, - }, - } -} - -// AlgorithmFromThrift converts the thrift object to the Algorithm struct for easier usage. -func AlgorithmFromThrift(enc *format.EncryptionAlgorithm) (ret Algorithm) { - if enc.IsSetAES_GCM_V1() { - ret.Algo = AesGcm - ret.Aad.AadFileUnique = enc.AES_GCM_V1.AadFileUnique - ret.Aad.AadPrefix = enc.AES_GCM_V1.AadPrefix - ret.Aad.SupplyAadPrefix = *enc.AES_GCM_V1.SupplyAadPrefix - return - } - ret.Algo = AesCtr - ret.Aad.AadFileUnique = enc.AES_GCM_CTR_V1.AadFileUnique - ret.Aad.AadPrefix = enc.AES_GCM_CTR_V1.AadPrefix - ret.Aad.SupplyAadPrefix = *enc.AES_GCM_CTR_V1.SupplyAadPrefix - return -} - -// FileEncryptionProperties describe how to encrypt a parquet file when writing data. -type FileEncryptionProperties struct { - alg Algorithm - footerKey string - footerKeyMetadata string - encryptedFooter bool - fileAad string - utilized bool - storeAadPrefixInFile bool - aadPrefix string - encryptedCols ColumnPathToEncryptionPropsMap -} - -// EncryptedFooter returns if the footer for this file should be encrypted or left in plaintext. -func (fe *FileEncryptionProperties) EncryptedFooter() bool { return fe.encryptedFooter } - -// Algorithm returns the description of how we will perform the encryption, the algorithm, prefixes, and so on. -func (fe *FileEncryptionProperties) Algorithm() Algorithm { return fe.alg } - -// FooterKey returns the actual key used to encrypt the footer if it is encrypted, or to encrypt any columns which -// will be encrypted with it rather than their own keys. -func (fe *FileEncryptionProperties) FooterKey() string { return fe.footerKey } - -// FooterKeyMetadata is used for retrieving a key from the key retriever in order to set the footer key -func (fe *FileEncryptionProperties) FooterKeyMetadata() string { return fe.footerKeyMetadata } - -// FileAad returns the aad identification to be used at the file level which gets concatenated with the row and column -// information for encrypting data. -func (fe *FileEncryptionProperties) FileAad() string { return fe.fileAad } - -// IsUtilized returns whether or not this instance has been used to encrypt a file -func (fe *FileEncryptionProperties) IsUtilized() bool { return fe.utilized } - -// SetUtilized is called after writing a file. A FileEncryptionProperties object can be used for writing one file only, -// the encryption keys will be wiped out upon completion of writing the file. -func (fe *FileEncryptionProperties) SetUtilized() { fe.utilized = true } - -// EncryptedColumns returns the mapping of column paths to column encryption properties -func (fe *FileEncryptionProperties) EncryptedColumns() ColumnPathToEncryptionPropsMap { - return fe.encryptedCols -} - -// ColumnEncryptionProperties returns the properties for encrypting a given column. -// -// This may be nil for columns that aren't encrypted or may be default properties. -func (fe *FileEncryptionProperties) ColumnEncryptionProperties(path string) *ColumnEncryptionProperties { - if len(fe.encryptedCols) == 0 { - return NewColumnEncryptionProperties(path) - } - if c, ok := fe.encryptedCols[path]; ok { - return c - } - return nil -} - -// Clone allows returning an identical property setup for another file with the option to update the aadPrefix, -// (if given the empty string, the current aad prefix will be used) since a single instance can only be used -// to encrypt one file before wiping out the keys. -func (fe *FileEncryptionProperties) Clone(newAadPrefix string) *FileEncryptionProperties { - footerKeyCopy := fe.footerKey - encryptedColsCopy := make(ColumnPathToEncryptionPropsMap) - for k, v := range fe.encryptedCols { - encryptedColsCopy[k] = v.Clone() - } - if newAadPrefix == "" { - newAadPrefix = fe.aadPrefix - } - - opts := []EncryptOption{ - WithAlg(fe.alg.Algo), WithFooterKeyMetadata(fe.footerKeyMetadata), - WithAadPrefix(newAadPrefix), WithEncryptedColumns(encryptedColsCopy), - } - if !fe.encryptedFooter { - opts = append(opts, WithPlaintextFooter()) - } - if !fe.storeAadPrefixInFile { - opts = append(opts, DisableAadPrefixStorage()) - } - return NewFileEncryptionProperties(footerKeyCopy, opts...) -} - -// WipeOutEncryptionKeys clears all of the encryption keys for this and the columns -func (fe *FileEncryptionProperties) WipeOutEncryptionKeys() { - fe.footerKey = "" - for _, elem := range fe.encryptedCols { - elem.WipeOutEncryptionKey() - } -} - -type configEncrypt struct { - cipher Cipher - encryptFooter bool - keyMetadata string - aadprefix string - storeAadPrefixInFile bool - encryptedCols ColumnPathToEncryptionPropsMap -} - -// EncryptOption is used for specifying values when building FileEncryptionProperties -type EncryptOption func(*configEncrypt) - -// WithPlaintextFooter sets the writer to write the footer in plain text, otherwise the footer will be encrypted -// too (which is the default behavior). -func WithPlaintextFooter() EncryptOption { - return func(cfg *configEncrypt) { - cfg.encryptFooter = false - } -} - -// WithAlg sets the encryption algorithm to utilize. (default is AesGcm) -func WithAlg(cipher Cipher) EncryptOption { - return func(cfg *configEncrypt) { - cfg.cipher = cipher - } -} - -// WithFooterKeyID sets a key retrieval metadata to use (converted from string), this must be a utf8 string. -// -// use either WithFooterKeyID or WithFooterKeyMetadata, not both. -func WithFooterKeyID(key string) EncryptOption { - if !utf8.ValidString(key) { - panic("parquet: footer key id should be UTF8 encoded") - } - return WithFooterKeyMetadata(key) -} - -// WithFooterKeyMetadata sets a key retrieval metadata to use for getting the key. -// -// Use either WithFooterKeyID or WithFooterKeyMetadata, not both. -func WithFooterKeyMetadata(keyMeta string) EncryptOption { - return func(cfg *configEncrypt) { - if keyMeta != "" { - cfg.keyMetadata = keyMeta - } - } -} - -// WithAadPrefix sets the AAD prefix to use for encryption and by default will store it in the file -func WithAadPrefix(aadPrefix string) EncryptOption { - return func(cfg *configEncrypt) { - if aadPrefix != "" { - cfg.aadprefix = aadPrefix - cfg.storeAadPrefixInFile = true - } - } -} - -// DisableAadPrefixStorage will set the properties to not store the AadPrefix in the file. If this isn't called -// and the AadPrefix is set, then it will be stored. This needs to in the options *after* WithAadPrefix to have an effect. -func DisableAadPrefixStorage() EncryptOption { - return func(cfg *configEncrypt) { - cfg.storeAadPrefixInFile = false - } -} - -// WithEncryptedColumns sets the map of columns and their properties (keys etc.) If not called, then all columns will -// be encrypted with the footer key. If called, then columns not in the map will be left unencrypted. -func WithEncryptedColumns(encrypted ColumnPathToEncryptionPropsMap) EncryptOption { - none := func(*configEncrypt) {} - if len(encrypted) == 0 { - return none - } - return func(cfg *configEncrypt) { - if len(cfg.encryptedCols) != 0 { - panic("column properties already set") - } - for _, v := range encrypted { - if v.IsUtilized() { - panic("column properties utilized in another file") - } - v.SetUtilized() - } - cfg.encryptedCols = encrypted - } -} - -// NewFileEncryptionProperties returns a new File Encryption description object using the options provided. -func NewFileEncryptionProperties(footerKey string, opts ...EncryptOption) *FileEncryptionProperties { - var cfg configEncrypt - cfg.cipher = DefaultEncryptionAlgorithm - cfg.encryptFooter = DefaultEncryptedFooter - for _, o := range opts { - o(&cfg) - } - - props := &FileEncryptionProperties{ - footerKey: footerKey, - footerKeyMetadata: cfg.keyMetadata, - encryptedFooter: cfg.encryptFooter, - aadPrefix: cfg.aadprefix, - storeAadPrefixInFile: cfg.storeAadPrefixInFile, - encryptedCols: cfg.encryptedCols, - utilized: false, - } - - aadFileUnique := [AadFileUniqueLength]uint8{} - _, err := rand.Read(aadFileUnique[:]) - if err != nil { - panic(err) - } - - supplyAadPrefix := false - if props.aadPrefix == "" { - props.fileAad = string(aadFileUnique[:]) - } else { - props.fileAad = props.aadPrefix + string(aadFileUnique[:]) - if !props.storeAadPrefixInFile { - supplyAadPrefix = true - } - } - props.alg.Algo = cfg.cipher - props.alg.Aad.AadFileUnique = aadFileUnique[:] - props.alg.Aad.SupplyAadPrefix = supplyAadPrefix - if cfg.aadprefix != "" && cfg.storeAadPrefixInFile { - props.alg.Aad.AadPrefix = []byte(props.aadPrefix) - } - return props -} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_off.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_off.go deleted file mode 100644 index 52b9a233..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_off.go +++ /dev/null @@ -1,24 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !assert - -package debug - -// Assert will panic with msg if cond is false. -// -// msg must be a string, func() string or fmt.Stringer. -func Assert(cond bool, msg interface{}) {} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/doc.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/doc.go deleted file mode 100644 index 61684d62..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package debug provides APIs for conditional runtime assertions and debug logging. -// -// Using Assert -// -// To enable runtime assertions, build with the assert tag. When the assert tag is omitted, -// the code for the assertion will be omitted from the binary. -package debug diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_off.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_off.go deleted file mode 100644 index 23dcccd8..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_off.go +++ /dev/null @@ -1,24 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !debug - -package debug - -// use build tags in order to control the existence of this log function vs it getting -// optimized away as a noop without the debug build tag. - -func Log(interface{}) {} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/GoUnusedProtection__.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/GoUnusedProtection__.go deleted file mode 100644 index 1de0c8de..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Code generated by Thrift Compiler (0.16.0). DO NOT EDIT. - -package parquet - -var GoUnusedProtection__ int; - diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet-consts.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet-consts.go deleted file mode 100644 index d4a63b22..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet-consts.go +++ /dev/null @@ -1,23 +0,0 @@ -// Code generated by Thrift Compiler (0.16.0). DO NOT EDIT. - -package parquet - -import ( - "bytes" - "context" - "fmt" - "time" - thrift "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - - -func init() { -} - diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet.go b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet.go deleted file mode 100644 index d4508f8e..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/parquet.go +++ /dev/null @@ -1,10967 +0,0 @@ -// Code generated by Thrift Compiler (0.16.0). DO NOT EDIT. - -package parquet - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "time" - thrift "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = time.Now -var _ = bytes.Equal - -//Types supported by Parquet. These types are intended to be used in combination -//with the encodings to control the on disk storage format. -//For example INT16 is not included as a type since a good encoding of INT32 -//would handle this. -type Type int64 -const ( - Type_BOOLEAN Type = 0 - Type_INT32 Type = 1 - Type_INT64 Type = 2 - Type_INT96 Type = 3 - Type_FLOAT Type = 4 - Type_DOUBLE Type = 5 - Type_BYTE_ARRAY Type = 6 - Type_FIXED_LEN_BYTE_ARRAY Type = 7 -) - -func (p Type) String() string { - switch p { - case Type_BOOLEAN: return "BOOLEAN" - case Type_INT32: return "INT32" - case Type_INT64: return "INT64" - case Type_INT96: return "INT96" - case Type_FLOAT: return "FLOAT" - case Type_DOUBLE: return "DOUBLE" - case Type_BYTE_ARRAY: return "BYTE_ARRAY" - case Type_FIXED_LEN_BYTE_ARRAY: return "FIXED_LEN_BYTE_ARRAY" - } - return "" -} - -func TypeFromString(s string) (Type, error) { - switch s { - case "BOOLEAN": return Type_BOOLEAN, nil - case "INT32": return Type_INT32, nil - case "INT64": return Type_INT64, nil - case "INT96": return Type_INT96, nil - case "FLOAT": return Type_FLOAT, nil - case "DOUBLE": return Type_DOUBLE, nil - case "BYTE_ARRAY": return Type_BYTE_ARRAY, nil - case "FIXED_LEN_BYTE_ARRAY": return Type_FIXED_LEN_BYTE_ARRAY, nil - } - return Type(0), fmt.Errorf("not a valid Type string") -} - - -func TypePtr(v Type) *Type { return &v } - -func (p Type) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *Type) UnmarshalText(text []byte) error { -q, err := TypeFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *Type) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = Type(v) -return nil -} - -func (p * Type) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -//Common types used by frameworks(e.g. hive, pig) using parquet. This helps map -//between types in those frameworks to the base types in parquet. This is only -//metadata and not needed to read or write the data. -type ConvertedType int64 -const ( - ConvertedType_UTF8 ConvertedType = 0 - ConvertedType_MAP ConvertedType = 1 - ConvertedType_MAP_KEY_VALUE ConvertedType = 2 - ConvertedType_LIST ConvertedType = 3 - ConvertedType_ENUM ConvertedType = 4 - ConvertedType_DECIMAL ConvertedType = 5 - ConvertedType_DATE ConvertedType = 6 - ConvertedType_TIME_MILLIS ConvertedType = 7 - ConvertedType_TIME_MICROS ConvertedType = 8 - ConvertedType_TIMESTAMP_MILLIS ConvertedType = 9 - ConvertedType_TIMESTAMP_MICROS ConvertedType = 10 - ConvertedType_UINT_8 ConvertedType = 11 - ConvertedType_UINT_16 ConvertedType = 12 - ConvertedType_UINT_32 ConvertedType = 13 - ConvertedType_UINT_64 ConvertedType = 14 - ConvertedType_INT_8 ConvertedType = 15 - ConvertedType_INT_16 ConvertedType = 16 - ConvertedType_INT_32 ConvertedType = 17 - ConvertedType_INT_64 ConvertedType = 18 - ConvertedType_JSON ConvertedType = 19 - ConvertedType_BSON ConvertedType = 20 - ConvertedType_INTERVAL ConvertedType = 21 -) - -func (p ConvertedType) String() string { - switch p { - case ConvertedType_UTF8: return "UTF8" - case ConvertedType_MAP: return "MAP" - case ConvertedType_MAP_KEY_VALUE: return "MAP_KEY_VALUE" - case ConvertedType_LIST: return "LIST" - case ConvertedType_ENUM: return "ENUM" - case ConvertedType_DECIMAL: return "DECIMAL" - case ConvertedType_DATE: return "DATE" - case ConvertedType_TIME_MILLIS: return "TIME_MILLIS" - case ConvertedType_TIME_MICROS: return "TIME_MICROS" - case ConvertedType_TIMESTAMP_MILLIS: return "TIMESTAMP_MILLIS" - case ConvertedType_TIMESTAMP_MICROS: return "TIMESTAMP_MICROS" - case ConvertedType_UINT_8: return "UINT_8" - case ConvertedType_UINT_16: return "UINT_16" - case ConvertedType_UINT_32: return "UINT_32" - case ConvertedType_UINT_64: return "UINT_64" - case ConvertedType_INT_8: return "INT_8" - case ConvertedType_INT_16: return "INT_16" - case ConvertedType_INT_32: return "INT_32" - case ConvertedType_INT_64: return "INT_64" - case ConvertedType_JSON: return "JSON" - case ConvertedType_BSON: return "BSON" - case ConvertedType_INTERVAL: return "INTERVAL" - } - return "" -} - -func ConvertedTypeFromString(s string) (ConvertedType, error) { - switch s { - case "UTF8": return ConvertedType_UTF8, nil - case "MAP": return ConvertedType_MAP, nil - case "MAP_KEY_VALUE": return ConvertedType_MAP_KEY_VALUE, nil - case "LIST": return ConvertedType_LIST, nil - case "ENUM": return ConvertedType_ENUM, nil - case "DECIMAL": return ConvertedType_DECIMAL, nil - case "DATE": return ConvertedType_DATE, nil - case "TIME_MILLIS": return ConvertedType_TIME_MILLIS, nil - case "TIME_MICROS": return ConvertedType_TIME_MICROS, nil - case "TIMESTAMP_MILLIS": return ConvertedType_TIMESTAMP_MILLIS, nil - case "TIMESTAMP_MICROS": return ConvertedType_TIMESTAMP_MICROS, nil - case "UINT_8": return ConvertedType_UINT_8, nil - case "UINT_16": return ConvertedType_UINT_16, nil - case "UINT_32": return ConvertedType_UINT_32, nil - case "UINT_64": return ConvertedType_UINT_64, nil - case "INT_8": return ConvertedType_INT_8, nil - case "INT_16": return ConvertedType_INT_16, nil - case "INT_32": return ConvertedType_INT_32, nil - case "INT_64": return ConvertedType_INT_64, nil - case "JSON": return ConvertedType_JSON, nil - case "BSON": return ConvertedType_BSON, nil - case "INTERVAL": return ConvertedType_INTERVAL, nil - } - return ConvertedType(0), fmt.Errorf("not a valid ConvertedType string") -} - - -func ConvertedTypePtr(v ConvertedType) *ConvertedType { return &v } - -func (p ConvertedType) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *ConvertedType) UnmarshalText(text []byte) error { -q, err := ConvertedTypeFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *ConvertedType) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = ConvertedType(v) -return nil -} - -func (p * ConvertedType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -//Representation of Schemas -type FieldRepetitionType int64 -const ( - FieldRepetitionType_REQUIRED FieldRepetitionType = 0 - FieldRepetitionType_OPTIONAL FieldRepetitionType = 1 - FieldRepetitionType_REPEATED FieldRepetitionType = 2 -) - -func (p FieldRepetitionType) String() string { - switch p { - case FieldRepetitionType_REQUIRED: return "REQUIRED" - case FieldRepetitionType_OPTIONAL: return "OPTIONAL" - case FieldRepetitionType_REPEATED: return "REPEATED" - } - return "" -} - -func FieldRepetitionTypeFromString(s string) (FieldRepetitionType, error) { - switch s { - case "REQUIRED": return FieldRepetitionType_REQUIRED, nil - case "OPTIONAL": return FieldRepetitionType_OPTIONAL, nil - case "REPEATED": return FieldRepetitionType_REPEATED, nil - } - return FieldRepetitionType(0), fmt.Errorf("not a valid FieldRepetitionType string") -} - - -func FieldRepetitionTypePtr(v FieldRepetitionType) *FieldRepetitionType { return &v } - -func (p FieldRepetitionType) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *FieldRepetitionType) UnmarshalText(text []byte) error { -q, err := FieldRepetitionTypeFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *FieldRepetitionType) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = FieldRepetitionType(v) -return nil -} - -func (p * FieldRepetitionType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -//Encodings supported by Parquet. Not all encodings are valid for all types. These -//enums are also used to specify the encoding of definition and repetition levels. -//See the accompanying doc for the details of the more complicated encodings. -type Encoding int64 -const ( - Encoding_PLAIN Encoding = 0 - Encoding_PLAIN_DICTIONARY Encoding = 2 - Encoding_RLE Encoding = 3 - Encoding_BIT_PACKED Encoding = 4 - Encoding_DELTA_BINARY_PACKED Encoding = 5 - Encoding_DELTA_LENGTH_BYTE_ARRAY Encoding = 6 - Encoding_DELTA_BYTE_ARRAY Encoding = 7 - Encoding_RLE_DICTIONARY Encoding = 8 - Encoding_BYTE_STREAM_SPLIT Encoding = 9 -) - -func (p Encoding) String() string { - switch p { - case Encoding_PLAIN: return "PLAIN" - case Encoding_PLAIN_DICTIONARY: return "PLAIN_DICTIONARY" - case Encoding_RLE: return "RLE" - case Encoding_BIT_PACKED: return "BIT_PACKED" - case Encoding_DELTA_BINARY_PACKED: return "DELTA_BINARY_PACKED" - case Encoding_DELTA_LENGTH_BYTE_ARRAY: return "DELTA_LENGTH_BYTE_ARRAY" - case Encoding_DELTA_BYTE_ARRAY: return "DELTA_BYTE_ARRAY" - case Encoding_RLE_DICTIONARY: return "RLE_DICTIONARY" - case Encoding_BYTE_STREAM_SPLIT: return "BYTE_STREAM_SPLIT" - } - return "" -} - -func EncodingFromString(s string) (Encoding, error) { - switch s { - case "PLAIN": return Encoding_PLAIN, nil - case "PLAIN_DICTIONARY": return Encoding_PLAIN_DICTIONARY, nil - case "RLE": return Encoding_RLE, nil - case "BIT_PACKED": return Encoding_BIT_PACKED, nil - case "DELTA_BINARY_PACKED": return Encoding_DELTA_BINARY_PACKED, nil - case "DELTA_LENGTH_BYTE_ARRAY": return Encoding_DELTA_LENGTH_BYTE_ARRAY, nil - case "DELTA_BYTE_ARRAY": return Encoding_DELTA_BYTE_ARRAY, nil - case "RLE_DICTIONARY": return Encoding_RLE_DICTIONARY, nil - case "BYTE_STREAM_SPLIT": return Encoding_BYTE_STREAM_SPLIT, nil - } - return Encoding(0), fmt.Errorf("not a valid Encoding string") -} - - -func EncodingPtr(v Encoding) *Encoding { return &v } - -func (p Encoding) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *Encoding) UnmarshalText(text []byte) error { -q, err := EncodingFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *Encoding) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = Encoding(v) -return nil -} - -func (p * Encoding) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -//Supported compression algorithms. -// -//Codecs added in format version X.Y can be read by readers based on X.Y and later. -//Codec support may vary between readers based on the format version and -//libraries available at runtime. -// -//See Compression.md for a detailed specification of these algorithms. -type CompressionCodec int64 -const ( - CompressionCodec_UNCOMPRESSED CompressionCodec = 0 - CompressionCodec_SNAPPY CompressionCodec = 1 - CompressionCodec_GZIP CompressionCodec = 2 - CompressionCodec_LZO CompressionCodec = 3 - CompressionCodec_BROTLI CompressionCodec = 4 - CompressionCodec_LZ4 CompressionCodec = 5 - CompressionCodec_ZSTD CompressionCodec = 6 - CompressionCodec_LZ4_RAW CompressionCodec = 7 -) - -func (p CompressionCodec) String() string { - switch p { - case CompressionCodec_UNCOMPRESSED: return "UNCOMPRESSED" - case CompressionCodec_SNAPPY: return "SNAPPY" - case CompressionCodec_GZIP: return "GZIP" - case CompressionCodec_LZO: return "LZO" - case CompressionCodec_BROTLI: return "BROTLI" - case CompressionCodec_LZ4: return "LZ4" - case CompressionCodec_ZSTD: return "ZSTD" - case CompressionCodec_LZ4_RAW: return "LZ4_RAW" - } - return "" -} - -func CompressionCodecFromString(s string) (CompressionCodec, error) { - switch s { - case "UNCOMPRESSED": return CompressionCodec_UNCOMPRESSED, nil - case "SNAPPY": return CompressionCodec_SNAPPY, nil - case "GZIP": return CompressionCodec_GZIP, nil - case "LZO": return CompressionCodec_LZO, nil - case "BROTLI": return CompressionCodec_BROTLI, nil - case "LZ4": return CompressionCodec_LZ4, nil - case "ZSTD": return CompressionCodec_ZSTD, nil - case "LZ4_RAW": return CompressionCodec_LZ4_RAW, nil - } - return CompressionCodec(0), fmt.Errorf("not a valid CompressionCodec string") -} - - -func CompressionCodecPtr(v CompressionCodec) *CompressionCodec { return &v } - -func (p CompressionCodec) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *CompressionCodec) UnmarshalText(text []byte) error { -q, err := CompressionCodecFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *CompressionCodec) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = CompressionCodec(v) -return nil -} - -func (p * CompressionCodec) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -type PageType int64 -const ( - PageType_DATA_PAGE PageType = 0 - PageType_INDEX_PAGE PageType = 1 - PageType_DICTIONARY_PAGE PageType = 2 - PageType_DATA_PAGE_V2 PageType = 3 -) - -func (p PageType) String() string { - switch p { - case PageType_DATA_PAGE: return "DATA_PAGE" - case PageType_INDEX_PAGE: return "INDEX_PAGE" - case PageType_DICTIONARY_PAGE: return "DICTIONARY_PAGE" - case PageType_DATA_PAGE_V2: return "DATA_PAGE_V2" - } - return "" -} - -func PageTypeFromString(s string) (PageType, error) { - switch s { - case "DATA_PAGE": return PageType_DATA_PAGE, nil - case "INDEX_PAGE": return PageType_INDEX_PAGE, nil - case "DICTIONARY_PAGE": return PageType_DICTIONARY_PAGE, nil - case "DATA_PAGE_V2": return PageType_DATA_PAGE_V2, nil - } - return PageType(0), fmt.Errorf("not a valid PageType string") -} - - -func PageTypePtr(v PageType) *PageType { return &v } - -func (p PageType) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *PageType) UnmarshalText(text []byte) error { -q, err := PageTypeFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *PageType) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = PageType(v) -return nil -} - -func (p * PageType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -//Enum to annotate whether lists of min/max elements inside ColumnIndex -//are ordered and if so, in which direction. -type BoundaryOrder int64 -const ( - BoundaryOrder_UNORDERED BoundaryOrder = 0 - BoundaryOrder_ASCENDING BoundaryOrder = 1 - BoundaryOrder_DESCENDING BoundaryOrder = 2 -) - -func (p BoundaryOrder) String() string { - switch p { - case BoundaryOrder_UNORDERED: return "UNORDERED" - case BoundaryOrder_ASCENDING: return "ASCENDING" - case BoundaryOrder_DESCENDING: return "DESCENDING" - } - return "" -} - -func BoundaryOrderFromString(s string) (BoundaryOrder, error) { - switch s { - case "UNORDERED": return BoundaryOrder_UNORDERED, nil - case "ASCENDING": return BoundaryOrder_ASCENDING, nil - case "DESCENDING": return BoundaryOrder_DESCENDING, nil - } - return BoundaryOrder(0), fmt.Errorf("not a valid BoundaryOrder string") -} - - -func BoundaryOrderPtr(v BoundaryOrder) *BoundaryOrder { return &v } - -func (p BoundaryOrder) MarshalText() ([]byte, error) { -return []byte(p.String()), nil -} - -func (p *BoundaryOrder) UnmarshalText(text []byte) error { -q, err := BoundaryOrderFromString(string(text)) -if (err != nil) { -return err -} -*p = q -return nil -} - -func (p *BoundaryOrder) Scan(value interface{}) error { -v, ok := value.(int64) -if !ok { -return errors.New("Scan value is not int64") -} -*p = BoundaryOrder(v) -return nil -} - -func (p * BoundaryOrder) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } -return int64(*p), nil -} -// Statistics per row group and per page -// All fields are optional. -// -// Attributes: -// - Max: DEPRECATED: min and max value of the column. Use min_value and max_value. -// -// Values are encoded using PLAIN encoding, except that variable-length byte -// arrays do not include a length prefix. -// -// These fields encode min and max values determined by signed comparison -// only. New files should use the correct order for a column's logical type -// and store the values in the min_value and max_value fields. -// -// To support older readers, these may be set when the column order is -// signed. -// - Min -// - NullCount: count of null value in the column -// - DistinctCount: count of distinct values occurring -// - MaxValue: Min and max values for the column, determined by its ColumnOrder. -// -// Values are encoded using PLAIN encoding, except that variable-length byte -// arrays do not include a length prefix. -// - MinValue -type Statistics struct { - Max []byte `thrift:"max,1" db:"max" json:"max,omitempty"` - Min []byte `thrift:"min,2" db:"min" json:"min,omitempty"` - NullCount *int64 `thrift:"null_count,3" db:"null_count" json:"null_count,omitempty"` - DistinctCount *int64 `thrift:"distinct_count,4" db:"distinct_count" json:"distinct_count,omitempty"` - MaxValue []byte `thrift:"max_value,5" db:"max_value" json:"max_value,omitempty"` - MinValue []byte `thrift:"min_value,6" db:"min_value" json:"min_value,omitempty"` -} - -func NewStatistics() *Statistics { - return &Statistics{} -} - -var Statistics_Max_DEFAULT []byte - -func (p *Statistics) GetMax() []byte { - return p.Max -} -var Statistics_Min_DEFAULT []byte - -func (p *Statistics) GetMin() []byte { - return p.Min -} -var Statistics_NullCount_DEFAULT int64 -func (p *Statistics) GetNullCount() int64 { - if !p.IsSetNullCount() { - return Statistics_NullCount_DEFAULT - } -return *p.NullCount -} -var Statistics_DistinctCount_DEFAULT int64 -func (p *Statistics) GetDistinctCount() int64 { - if !p.IsSetDistinctCount() { - return Statistics_DistinctCount_DEFAULT - } -return *p.DistinctCount -} -var Statistics_MaxValue_DEFAULT []byte - -func (p *Statistics) GetMaxValue() []byte { - return p.MaxValue -} -var Statistics_MinValue_DEFAULT []byte - -func (p *Statistics) GetMinValue() []byte { - return p.MinValue -} -func (p *Statistics) IsSetMax() bool { - return p.Max != nil -} - -func (p *Statistics) IsSetMin() bool { - return p.Min != nil -} - -func (p *Statistics) IsSetNullCount() bool { - return p.NullCount != nil -} - -func (p *Statistics) IsSetDistinctCount() bool { - return p.DistinctCount != nil -} - -func (p *Statistics) IsSetMaxValue() bool { - return p.MaxValue != nil -} - -func (p *Statistics) IsSetMinValue() bool { - return p.MinValue != nil -} - -func (p *Statistics) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.STRING { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Statistics) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Max = v -} - return nil -} - -func (p *Statistics) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Min = v -} - return nil -} - -func (p *Statistics) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.NullCount = &v -} - return nil -} - -func (p *Statistics) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.DistinctCount = &v -} - return nil -} - -func (p *Statistics) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.MaxValue = v -} - return nil -} - -func (p *Statistics) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - p.MinValue = v -} - return nil -} - -func (p *Statistics) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Statistics"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Statistics) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetMax() { - if err := oprot.WriteFieldBegin(ctx, "max", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:max: ", p), err) } - if err := oprot.WriteBinary(ctx, p.Max); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.max (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:max: ", p), err) } - } - return err -} - -func (p *Statistics) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetMin() { - if err := oprot.WriteFieldBegin(ctx, "min", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:min: ", p), err) } - if err := oprot.WriteBinary(ctx, p.Min); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.min (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:min: ", p), err) } - } - return err -} - -func (p *Statistics) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetNullCount() { - if err := oprot.WriteFieldBegin(ctx, "null_count", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:null_count: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.NullCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.null_count (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:null_count: ", p), err) } - } - return err -} - -func (p *Statistics) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDistinctCount() { - if err := oprot.WriteFieldBegin(ctx, "distinct_count", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:distinct_count: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.DistinctCount)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.distinct_count (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:distinct_count: ", p), err) } - } - return err -} - -func (p *Statistics) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetMaxValue() { - if err := oprot.WriteFieldBegin(ctx, "max_value", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:max_value: ", p), err) } - if err := oprot.WriteBinary(ctx, p.MaxValue); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.max_value (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:max_value: ", p), err) } - } - return err -} - -func (p *Statistics) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetMinValue() { - if err := oprot.WriteFieldBegin(ctx, "min_value", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:min_value: ", p), err) } - if err := oprot.WriteBinary(ctx, p.MinValue); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.min_value (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:min_value: ", p), err) } - } - return err -} - -func (p *Statistics) Equals(other *Statistics) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if bytes.Compare(p.Max, other.Max) != 0 { return false } - if bytes.Compare(p.Min, other.Min) != 0 { return false } - if p.NullCount != other.NullCount { - if p.NullCount == nil || other.NullCount == nil { - return false - } - if (*p.NullCount) != (*other.NullCount) { return false } - } - if p.DistinctCount != other.DistinctCount { - if p.DistinctCount == nil || other.DistinctCount == nil { - return false - } - if (*p.DistinctCount) != (*other.DistinctCount) { return false } - } - if bytes.Compare(p.MaxValue, other.MaxValue) != 0 { return false } - if bytes.Compare(p.MinValue, other.MinValue) != 0 { return false } - return true -} - -func (p *Statistics) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Statistics(%+v)", *p) -} - -// Empty structs to use as logical type annotations -type StringType struct { -} - -func NewStringType() *StringType { - return &StringType{} -} - -func (p *StringType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *StringType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "StringType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *StringType) Equals(other *StringType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *StringType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("StringType(%+v)", *p) -} - -type UUIDType struct { -} - -func NewUUIDType() *UUIDType { - return &UUIDType{} -} - -func (p *UUIDType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *UUIDType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "UUIDType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *UUIDType) Equals(other *UUIDType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *UUIDType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("UUIDType(%+v)", *p) -} - -type MapType struct { -} - -func NewMapType() *MapType { - return &MapType{} -} - -func (p *MapType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MapType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "MapType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *MapType) Equals(other *MapType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *MapType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MapType(%+v)", *p) -} - -type ListType struct { -} - -func NewListType() *ListType { - return &ListType{} -} - -func (p *ListType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ListType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "ListType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ListType) Equals(other *ListType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *ListType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ListType(%+v)", *p) -} - -type EnumType struct { -} - -func NewEnumType() *EnumType { - return &EnumType{} -} - -func (p *EnumType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *EnumType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "EnumType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *EnumType) Equals(other *EnumType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *EnumType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("EnumType(%+v)", *p) -} - -type DateType struct { -} - -func NewDateType() *DateType { - return &DateType{} -} - -func (p *DateType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *DateType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "DateType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *DateType) Equals(other *DateType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *DateType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DateType(%+v)", *p) -} - -// Logical type to annotate a column that is always null. -// -// Sometimes when discovering the schema of existing data, values are always -// null and the physical type can't be determined. This annotation signals -// the case where the physical type was guessed from all null values. -type NullType struct { -} - -func NewNullType() *NullType { - return &NullType{} -} - -func (p *NullType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *NullType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "NullType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *NullType) Equals(other *NullType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *NullType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("NullType(%+v)", *p) -} - -// Decimal logical type annotation -// -// To maintain forward-compatibility in v1, implementations using this logical -// type must also set scale and precision on the annotated SchemaElement. -// -// Allowed for physical types: INT32, INT64, FIXED, and BINARY -// -// Attributes: -// - Scale -// - Precision -type DecimalType struct { - Scale int32 `thrift:"scale,1,required" db:"scale" json:"scale"` - Precision int32 `thrift:"precision,2,required" db:"precision" json:"precision"` -} - -func NewDecimalType() *DecimalType { - return &DecimalType{} -} - - -func (p *DecimalType) GetScale() int32 { - return p.Scale -} - -func (p *DecimalType) GetPrecision() int32 { - return p.Precision -} -func (p *DecimalType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetScale bool = false; - var issetPrecision bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetScale = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetPrecision = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetScale{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Scale is not set")); - } - if !issetPrecision{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Precision is not set")); - } - return nil -} - -func (p *DecimalType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Scale = v -} - return nil -} - -func (p *DecimalType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Precision = v -} - return nil -} - -func (p *DecimalType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "DecimalType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *DecimalType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "scale", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:scale: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Scale)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.scale (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:scale: ", p), err) } - return err -} - -func (p *DecimalType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "precision", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:precision: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Precision)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.precision (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:precision: ", p), err) } - return err -} - -func (p *DecimalType) Equals(other *DecimalType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Scale != other.Scale { return false } - if p.Precision != other.Precision { return false } - return true -} - -func (p *DecimalType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DecimalType(%+v)", *p) -} - -// Time units for logical types -type MilliSeconds struct { -} - -func NewMilliSeconds() *MilliSeconds { - return &MilliSeconds{} -} - -func (p *MilliSeconds) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MilliSeconds) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "MilliSeconds"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *MilliSeconds) Equals(other *MilliSeconds) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *MilliSeconds) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MilliSeconds(%+v)", *p) -} - -type MicroSeconds struct { -} - -func NewMicroSeconds() *MicroSeconds { - return &MicroSeconds{} -} - -func (p *MicroSeconds) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *MicroSeconds) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "MicroSeconds"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *MicroSeconds) Equals(other *MicroSeconds) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *MicroSeconds) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("MicroSeconds(%+v)", *p) -} - -type NanoSeconds struct { -} - -func NewNanoSeconds() *NanoSeconds { - return &NanoSeconds{} -} - -func (p *NanoSeconds) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *NanoSeconds) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "NanoSeconds"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *NanoSeconds) Equals(other *NanoSeconds) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *NanoSeconds) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("NanoSeconds(%+v)", *p) -} - -// Attributes: -// - MILLIS -// - MICROS -// - NANOS -type TimeUnit struct { - MILLIS *MilliSeconds `thrift:"MILLIS,1" db:"MILLIS" json:"MILLIS,omitempty"` - MICROS *MicroSeconds `thrift:"MICROS,2" db:"MICROS" json:"MICROS,omitempty"` - NANOS *NanoSeconds `thrift:"NANOS,3" db:"NANOS" json:"NANOS,omitempty"` -} - -func NewTimeUnit() *TimeUnit { - return &TimeUnit{} -} - -var TimeUnit_MILLIS_DEFAULT *MilliSeconds -func (p *TimeUnit) GetMILLIS() *MilliSeconds { - if !p.IsSetMILLIS() { - return TimeUnit_MILLIS_DEFAULT - } -return p.MILLIS -} -var TimeUnit_MICROS_DEFAULT *MicroSeconds -func (p *TimeUnit) GetMICROS() *MicroSeconds { - if !p.IsSetMICROS() { - return TimeUnit_MICROS_DEFAULT - } -return p.MICROS -} -var TimeUnit_NANOS_DEFAULT *NanoSeconds -func (p *TimeUnit) GetNANOS() *NanoSeconds { - if !p.IsSetNANOS() { - return TimeUnit_NANOS_DEFAULT - } -return p.NANOS -} -func (p *TimeUnit) CountSetFieldsTimeUnit() int { - count := 0 - if (p.IsSetMILLIS()) { - count++ - } - if (p.IsSetMICROS()) { - count++ - } - if (p.IsSetNANOS()) { - count++ - } - return count - -} - -func (p *TimeUnit) IsSetMILLIS() bool { - return p.MILLIS != nil -} - -func (p *TimeUnit) IsSetMICROS() bool { - return p.MICROS != nil -} - -func (p *TimeUnit) IsSetNANOS() bool { - return p.NANOS != nil -} - -func (p *TimeUnit) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *TimeUnit) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.MILLIS = &MilliSeconds{} - if err := p.MILLIS.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MILLIS), err) - } - return nil -} - -func (p *TimeUnit) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - p.MICROS = &MicroSeconds{} - if err := p.MICROS.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MICROS), err) - } - return nil -} - -func (p *TimeUnit) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - p.NANOS = &NanoSeconds{} - if err := p.NANOS.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.NANOS), err) - } - return nil -} - -func (p *TimeUnit) Write(ctx context.Context, oprot thrift.TProtocol) error { - if c := p.CountSetFieldsTimeUnit(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) - } - if err := oprot.WriteStructBegin(ctx, "TimeUnit"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *TimeUnit) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetMILLIS() { - if err := oprot.WriteFieldBegin(ctx, "MILLIS", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:MILLIS: ", p), err) } - if err := p.MILLIS.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MILLIS), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:MILLIS: ", p), err) } - } - return err -} - -func (p *TimeUnit) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetMICROS() { - if err := oprot.WriteFieldBegin(ctx, "MICROS", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:MICROS: ", p), err) } - if err := p.MICROS.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MICROS), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:MICROS: ", p), err) } - } - return err -} - -func (p *TimeUnit) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetNANOS() { - if err := oprot.WriteFieldBegin(ctx, "NANOS", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:NANOS: ", p), err) } - if err := p.NANOS.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.NANOS), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:NANOS: ", p), err) } - } - return err -} - -func (p *TimeUnit) Equals(other *TimeUnit) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.MILLIS.Equals(other.MILLIS) { return false } - if !p.MICROS.Equals(other.MICROS) { return false } - if !p.NANOS.Equals(other.NANOS) { return false } - return true -} - -func (p *TimeUnit) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TimeUnit(%+v)", *p) -} - -// Timestamp logical type annotation -// -// Allowed for physical types: INT64 -// -// Attributes: -// - IsAdjustedToUTC -// - Unit -type TimestampType struct { - IsAdjustedToUTC bool `thrift:"isAdjustedToUTC,1,required" db:"isAdjustedToUTC" json:"isAdjustedToUTC"` - Unit *TimeUnit `thrift:"unit,2,required" db:"unit" json:"unit"` -} - -func NewTimestampType() *TimestampType { - return &TimestampType{} -} - - -func (p *TimestampType) GetIsAdjustedToUTC() bool { - return p.IsAdjustedToUTC -} -var TimestampType_Unit_DEFAULT *TimeUnit -func (p *TimestampType) GetUnit() *TimeUnit { - if !p.IsSetUnit() { - return TimestampType_Unit_DEFAULT - } -return p.Unit -} -func (p *TimestampType) IsSetUnit() bool { - return p.Unit != nil -} - -func (p *TimestampType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetIsAdjustedToUTC bool = false; - var issetUnit bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetIsAdjustedToUTC = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetUnit = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetIsAdjustedToUTC{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field IsAdjustedToUTC is not set")); - } - if !issetUnit{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Unit is not set")); - } - return nil -} - -func (p *TimestampType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.IsAdjustedToUTC = v -} - return nil -} - -func (p *TimestampType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - p.Unit = &TimeUnit{} - if err := p.Unit.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Unit), err) - } - return nil -} - -func (p *TimestampType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "TimestampType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *TimestampType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "isAdjustedToUTC", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:isAdjustedToUTC: ", p), err) } - if err := oprot.WriteBool(ctx, bool(p.IsAdjustedToUTC)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.isAdjustedToUTC (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:isAdjustedToUTC: ", p), err) } - return err -} - -func (p *TimestampType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "unit", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:unit: ", p), err) } - if err := p.Unit.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Unit), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:unit: ", p), err) } - return err -} - -func (p *TimestampType) Equals(other *TimestampType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.IsAdjustedToUTC != other.IsAdjustedToUTC { return false } - if !p.Unit.Equals(other.Unit) { return false } - return true -} - -func (p *TimestampType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TimestampType(%+v)", *p) -} - -// Time logical type annotation -// -// Allowed for physical types: INT32 (millis), INT64 (micros, nanos) -// -// Attributes: -// - IsAdjustedToUTC -// - Unit -type TimeType struct { - IsAdjustedToUTC bool `thrift:"isAdjustedToUTC,1,required" db:"isAdjustedToUTC" json:"isAdjustedToUTC"` - Unit *TimeUnit `thrift:"unit,2,required" db:"unit" json:"unit"` -} - -func NewTimeType() *TimeType { - return &TimeType{} -} - - -func (p *TimeType) GetIsAdjustedToUTC() bool { - return p.IsAdjustedToUTC -} -var TimeType_Unit_DEFAULT *TimeUnit -func (p *TimeType) GetUnit() *TimeUnit { - if !p.IsSetUnit() { - return TimeType_Unit_DEFAULT - } -return p.Unit -} -func (p *TimeType) IsSetUnit() bool { - return p.Unit != nil -} - -func (p *TimeType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetIsAdjustedToUTC bool = false; - var issetUnit bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetIsAdjustedToUTC = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetUnit = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetIsAdjustedToUTC{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field IsAdjustedToUTC is not set")); - } - if !issetUnit{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Unit is not set")); - } - return nil -} - -func (p *TimeType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.IsAdjustedToUTC = v -} - return nil -} - -func (p *TimeType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - p.Unit = &TimeUnit{} - if err := p.Unit.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Unit), err) - } - return nil -} - -func (p *TimeType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "TimeType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *TimeType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "isAdjustedToUTC", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:isAdjustedToUTC: ", p), err) } - if err := oprot.WriteBool(ctx, bool(p.IsAdjustedToUTC)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.isAdjustedToUTC (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:isAdjustedToUTC: ", p), err) } - return err -} - -func (p *TimeType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "unit", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:unit: ", p), err) } - if err := p.Unit.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Unit), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:unit: ", p), err) } - return err -} - -func (p *TimeType) Equals(other *TimeType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.IsAdjustedToUTC != other.IsAdjustedToUTC { return false } - if !p.Unit.Equals(other.Unit) { return false } - return true -} - -func (p *TimeType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TimeType(%+v)", *p) -} - -// Integer logical type annotation -// -// bitWidth must be 8, 16, 32, or 64. -// -// Allowed for physical types: INT32, INT64 -// -// Attributes: -// - BitWidth -// - IsSigned -type IntType struct { - BitWidth int8 `thrift:"bitWidth,1,required" db:"bitWidth" json:"bitWidth"` - IsSigned bool `thrift:"isSigned,2,required" db:"isSigned" json:"isSigned"` -} - -func NewIntType() *IntType { - return &IntType{} -} - - -func (p *IntType) GetBitWidth() int8 { - return p.BitWidth -} - -func (p *IntType) GetIsSigned() bool { - return p.IsSigned -} -func (p *IntType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetBitWidth bool = false; - var issetIsSigned bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.BYTE { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetBitWidth = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetIsSigned = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetBitWidth{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BitWidth is not set")); - } - if !issetIsSigned{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field IsSigned is not set")); - } - return nil -} - -func (p *IntType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadByte(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := int8(v) - p.BitWidth = temp -} - return nil -} - -func (p *IntType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.IsSigned = v -} - return nil -} - -func (p *IntType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "IntType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *IntType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "bitWidth", thrift.BYTE, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:bitWidth: ", p), err) } - if err := oprot.WriteByte(ctx, int8(p.BitWidth)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.bitWidth (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:bitWidth: ", p), err) } - return err -} - -func (p *IntType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "isSigned", thrift.BOOL, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:isSigned: ", p), err) } - if err := oprot.WriteBool(ctx, bool(p.IsSigned)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.isSigned (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:isSigned: ", p), err) } - return err -} - -func (p *IntType) Equals(other *IntType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.BitWidth != other.BitWidth { return false } - if p.IsSigned != other.IsSigned { return false } - return true -} - -func (p *IntType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("IntType(%+v)", *p) -} - -// Embedded JSON logical type annotation -// -// Allowed for physical types: BINARY -type JsonType struct { -} - -func NewJsonType() *JsonType { - return &JsonType{} -} - -func (p *JsonType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *JsonType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "JsonType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *JsonType) Equals(other *JsonType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *JsonType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("JsonType(%+v)", *p) -} - -// Embedded BSON logical type annotation -// -// Allowed for physical types: BINARY -type BsonType struct { -} - -func NewBsonType() *BsonType { - return &BsonType{} -} - -func (p *BsonType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BsonType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BsonType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BsonType) Equals(other *BsonType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *BsonType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BsonType(%+v)", *p) -} - -// LogicalType annotations to replace ConvertedType. -// -// To maintain compatibility, implementations using LogicalType for a -// SchemaElement must also set the corresponding ConvertedType from the -// following table. -// -// Attributes: -// - STRING -// - MAP -// - LIST -// - ENUM -// - DECIMAL -// - DATE -// - TIME -// - TIMESTAMP -// - INTEGER -// - UNKNOWN -// - JSON -// - BSON -// - UUID -type LogicalType struct { - STRING *StringType `thrift:"STRING,1" db:"STRING" json:"STRING,omitempty"` - MAP *MapType `thrift:"MAP,2" db:"MAP" json:"MAP,omitempty"` - LIST *ListType `thrift:"LIST,3" db:"LIST" json:"LIST,omitempty"` - ENUM *EnumType `thrift:"ENUM,4" db:"ENUM" json:"ENUM,omitempty"` - DECIMAL *DecimalType `thrift:"DECIMAL,5" db:"DECIMAL" json:"DECIMAL,omitempty"` - DATE *DateType `thrift:"DATE,6" db:"DATE" json:"DATE,omitempty"` - TIME *TimeType `thrift:"TIME,7" db:"TIME" json:"TIME,omitempty"` - TIMESTAMP *TimestampType `thrift:"TIMESTAMP,8" db:"TIMESTAMP" json:"TIMESTAMP,omitempty"` - // unused field # 9 - INTEGER *IntType `thrift:"INTEGER,10" db:"INTEGER" json:"INTEGER,omitempty"` - UNKNOWN *NullType `thrift:"UNKNOWN,11" db:"UNKNOWN" json:"UNKNOWN,omitempty"` - JSON *JsonType `thrift:"JSON,12" db:"JSON" json:"JSON,omitempty"` - BSON *BsonType `thrift:"BSON,13" db:"BSON" json:"BSON,omitempty"` - UUID *UUIDType `thrift:"UUID,14" db:"UUID" json:"UUID,omitempty"` -} - -func NewLogicalType() *LogicalType { - return &LogicalType{} -} - -var LogicalType_STRING_DEFAULT *StringType -func (p *LogicalType) GetSTRING() *StringType { - if !p.IsSetSTRING() { - return LogicalType_STRING_DEFAULT - } -return p.STRING -} -var LogicalType_MAP_DEFAULT *MapType -func (p *LogicalType) GetMAP() *MapType { - if !p.IsSetMAP() { - return LogicalType_MAP_DEFAULT - } -return p.MAP -} -var LogicalType_LIST_DEFAULT *ListType -func (p *LogicalType) GetLIST() *ListType { - if !p.IsSetLIST() { - return LogicalType_LIST_DEFAULT - } -return p.LIST -} -var LogicalType_ENUM_DEFAULT *EnumType -func (p *LogicalType) GetENUM() *EnumType { - if !p.IsSetENUM() { - return LogicalType_ENUM_DEFAULT - } -return p.ENUM -} -var LogicalType_DECIMAL_DEFAULT *DecimalType -func (p *LogicalType) GetDECIMAL() *DecimalType { - if !p.IsSetDECIMAL() { - return LogicalType_DECIMAL_DEFAULT - } -return p.DECIMAL -} -var LogicalType_DATE_DEFAULT *DateType -func (p *LogicalType) GetDATE() *DateType { - if !p.IsSetDATE() { - return LogicalType_DATE_DEFAULT - } -return p.DATE -} -var LogicalType_TIME_DEFAULT *TimeType -func (p *LogicalType) GetTIME() *TimeType { - if !p.IsSetTIME() { - return LogicalType_TIME_DEFAULT - } -return p.TIME -} -var LogicalType_TIMESTAMP_DEFAULT *TimestampType -func (p *LogicalType) GetTIMESTAMP() *TimestampType { - if !p.IsSetTIMESTAMP() { - return LogicalType_TIMESTAMP_DEFAULT - } -return p.TIMESTAMP -} -var LogicalType_INTEGER_DEFAULT *IntType -func (p *LogicalType) GetINTEGER() *IntType { - if !p.IsSetINTEGER() { - return LogicalType_INTEGER_DEFAULT - } -return p.INTEGER -} -var LogicalType_UNKNOWN_DEFAULT *NullType -func (p *LogicalType) GetUNKNOWN() *NullType { - if !p.IsSetUNKNOWN() { - return LogicalType_UNKNOWN_DEFAULT - } -return p.UNKNOWN -} -var LogicalType_JSON_DEFAULT *JsonType -func (p *LogicalType) GetJSON() *JsonType { - if !p.IsSetJSON() { - return LogicalType_JSON_DEFAULT - } -return p.JSON -} -var LogicalType_BSON_DEFAULT *BsonType -func (p *LogicalType) GetBSON() *BsonType { - if !p.IsSetBSON() { - return LogicalType_BSON_DEFAULT - } -return p.BSON -} -var LogicalType_UUID_DEFAULT *UUIDType -func (p *LogicalType) GetUUID() *UUIDType { - if !p.IsSetUUID() { - return LogicalType_UUID_DEFAULT - } -return p.UUID -} -func (p *LogicalType) CountSetFieldsLogicalType() int { - count := 0 - if (p.IsSetSTRING()) { - count++ - } - if (p.IsSetMAP()) { - count++ - } - if (p.IsSetLIST()) { - count++ - } - if (p.IsSetENUM()) { - count++ - } - if (p.IsSetDECIMAL()) { - count++ - } - if (p.IsSetDATE()) { - count++ - } - if (p.IsSetTIME()) { - count++ - } - if (p.IsSetTIMESTAMP()) { - count++ - } - if (p.IsSetINTEGER()) { - count++ - } - if (p.IsSetUNKNOWN()) { - count++ - } - if (p.IsSetJSON()) { - count++ - } - if (p.IsSetBSON()) { - count++ - } - if (p.IsSetUUID()) { - count++ - } - return count - -} - -func (p *LogicalType) IsSetSTRING() bool { - return p.STRING != nil -} - -func (p *LogicalType) IsSetMAP() bool { - return p.MAP != nil -} - -func (p *LogicalType) IsSetLIST() bool { - return p.LIST != nil -} - -func (p *LogicalType) IsSetENUM() bool { - return p.ENUM != nil -} - -func (p *LogicalType) IsSetDECIMAL() bool { - return p.DECIMAL != nil -} - -func (p *LogicalType) IsSetDATE() bool { - return p.DATE != nil -} - -func (p *LogicalType) IsSetTIME() bool { - return p.TIME != nil -} - -func (p *LogicalType) IsSetTIMESTAMP() bool { - return p.TIMESTAMP != nil -} - -func (p *LogicalType) IsSetINTEGER() bool { - return p.INTEGER != nil -} - -func (p *LogicalType) IsSetUNKNOWN() bool { - return p.UNKNOWN != nil -} - -func (p *LogicalType) IsSetJSON() bool { - return p.JSON != nil -} - -func (p *LogicalType) IsSetBSON() bool { - return p.BSON != nil -} - -func (p *LogicalType) IsSetUUID() bool { - return p.UUID != nil -} - -func (p *LogicalType) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField10(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField11(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 12: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField12(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 13: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField13(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 14: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField14(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *LogicalType) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.STRING = &StringType{} - if err := p.STRING.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.STRING), err) - } - return nil -} - -func (p *LogicalType) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - p.MAP = &MapType{} - if err := p.MAP.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MAP), err) - } - return nil -} - -func (p *LogicalType) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - p.LIST = &ListType{} - if err := p.LIST.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.LIST), err) - } - return nil -} - -func (p *LogicalType) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - p.ENUM = &EnumType{} - if err := p.ENUM.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ENUM), err) - } - return nil -} - -func (p *LogicalType) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - p.DECIMAL = &DecimalType{} - if err := p.DECIMAL.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DECIMAL), err) - } - return nil -} - -func (p *LogicalType) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - p.DATE = &DateType{} - if err := p.DATE.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DATE), err) - } - return nil -} - -func (p *LogicalType) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - p.TIME = &TimeType{} - if err := p.TIME.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TIME), err) - } - return nil -} - -func (p *LogicalType) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - p.TIMESTAMP = &TimestampType{} - if err := p.TIMESTAMP.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TIMESTAMP), err) - } - return nil -} - -func (p *LogicalType) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { - p.INTEGER = &IntType{} - if err := p.INTEGER.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.INTEGER), err) - } - return nil -} - -func (p *LogicalType) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { - p.UNKNOWN = &NullType{} - if err := p.UNKNOWN.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UNKNOWN), err) - } - return nil -} - -func (p *LogicalType) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { - p.JSON = &JsonType{} - if err := p.JSON.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.JSON), err) - } - return nil -} - -func (p *LogicalType) ReadField13(ctx context.Context, iprot thrift.TProtocol) error { - p.BSON = &BsonType{} - if err := p.BSON.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BSON), err) - } - return nil -} - -func (p *LogicalType) ReadField14(ctx context.Context, iprot thrift.TProtocol) error { - p.UUID = &UUIDType{} - if err := p.UUID.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UUID), err) - } - return nil -} - -func (p *LogicalType) Write(ctx context.Context, oprot thrift.TProtocol) error { - if c := p.CountSetFieldsLogicalType(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) - } - if err := oprot.WriteStructBegin(ctx, "LogicalType"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - if err := p.writeField8(ctx, oprot); err != nil { return err } - if err := p.writeField10(ctx, oprot); err != nil { return err } - if err := p.writeField11(ctx, oprot); err != nil { return err } - if err := p.writeField12(ctx, oprot); err != nil { return err } - if err := p.writeField13(ctx, oprot); err != nil { return err } - if err := p.writeField14(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *LogicalType) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSTRING() { - if err := oprot.WriteFieldBegin(ctx, "STRING", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:STRING: ", p), err) } - if err := p.STRING.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.STRING), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:STRING: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetMAP() { - if err := oprot.WriteFieldBegin(ctx, "MAP", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:MAP: ", p), err) } - if err := p.MAP.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MAP), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:MAP: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetLIST() { - if err := oprot.WriteFieldBegin(ctx, "LIST", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:LIST: ", p), err) } - if err := p.LIST.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.LIST), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:LIST: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetENUM() { - if err := oprot.WriteFieldBegin(ctx, "ENUM", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ENUM: ", p), err) } - if err := p.ENUM.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ENUM), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ENUM: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDECIMAL() { - if err := oprot.WriteFieldBegin(ctx, "DECIMAL", thrift.STRUCT, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:DECIMAL: ", p), err) } - if err := p.DECIMAL.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DECIMAL), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:DECIMAL: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDATE() { - if err := oprot.WriteFieldBegin(ctx, "DATE", thrift.STRUCT, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:DATE: ", p), err) } - if err := p.DATE.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DATE), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:DATE: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTIME() { - if err := oprot.WriteFieldBegin(ctx, "TIME", thrift.STRUCT, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:TIME: ", p), err) } - if err := p.TIME.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TIME), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:TIME: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTIMESTAMP() { - if err := oprot.WriteFieldBegin(ctx, "TIMESTAMP", thrift.STRUCT, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:TIMESTAMP: ", p), err) } - if err := p.TIMESTAMP.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TIMESTAMP), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:TIMESTAMP: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetINTEGER() { - if err := oprot.WriteFieldBegin(ctx, "INTEGER", thrift.STRUCT, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:INTEGER: ", p), err) } - if err := p.INTEGER.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.INTEGER), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:INTEGER: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetUNKNOWN() { - if err := oprot.WriteFieldBegin(ctx, "UNKNOWN", thrift.STRUCT, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:UNKNOWN: ", p), err) } - if err := p.UNKNOWN.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UNKNOWN), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:UNKNOWN: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetJSON() { - if err := oprot.WriteFieldBegin(ctx, "JSON", thrift.STRUCT, 12); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:JSON: ", p), err) } - if err := p.JSON.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.JSON), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 12:JSON: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField13(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetBSON() { - if err := oprot.WriteFieldBegin(ctx, "BSON", thrift.STRUCT, 13); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 13:BSON: ", p), err) } - if err := p.BSON.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BSON), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 13:BSON: ", p), err) } - } - return err -} - -func (p *LogicalType) writeField14(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetUUID() { - if err := oprot.WriteFieldBegin(ctx, "UUID", thrift.STRUCT, 14); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 14:UUID: ", p), err) } - if err := p.UUID.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UUID), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 14:UUID: ", p), err) } - } - return err -} - -func (p *LogicalType) Equals(other *LogicalType) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.STRING.Equals(other.STRING) { return false } - if !p.MAP.Equals(other.MAP) { return false } - if !p.LIST.Equals(other.LIST) { return false } - if !p.ENUM.Equals(other.ENUM) { return false } - if !p.DECIMAL.Equals(other.DECIMAL) { return false } - if !p.DATE.Equals(other.DATE) { return false } - if !p.TIME.Equals(other.TIME) { return false } - if !p.TIMESTAMP.Equals(other.TIMESTAMP) { return false } - if !p.INTEGER.Equals(other.INTEGER) { return false } - if !p.UNKNOWN.Equals(other.UNKNOWN) { return false } - if !p.JSON.Equals(other.JSON) { return false } - if !p.BSON.Equals(other.BSON) { return false } - if !p.UUID.Equals(other.UUID) { return false } - return true -} - -func (p *LogicalType) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("LogicalType(%+v)", *p) -} - -// Represents a element inside a schema definition. -// - if it is a group (inner node) then type is undefined and num_children is defined -// - if it is a primitive type (leaf) then type is defined and num_children is undefined -// the nodes are listed in depth first traversal order. -// -// Attributes: -// - Type: Data type for this field. Not set if the current element is a non-leaf node -// - TypeLength: If type is FIXED_LEN_BYTE_ARRAY, this is the byte length of the vales. -// Otherwise, if specified, this is the maximum bit length to store any of the values. -// (e.g. a low cardinality INT col could have this set to 3). Note that this is -// in the schema, and therefore fixed for the entire file. -// - RepetitionType: repetition of the field. The root of the schema does not have a repetition_type. -// All other nodes must have one -// - Name: Name of the field in the schema -// - NumChildren: Nested fields. Since thrift does not support nested fields, -// the nesting is flattened to a single list by a depth-first traversal. -// The children count is used to construct the nested relationship. -// This field is not set when the element is a primitive type -// - ConvertedType: When the schema is the result of a conversion from another model -// Used to record the original type to help with cross conversion. -// - Scale: Used when this column contains decimal data. -// See the DECIMAL converted type for more details. -// - Precision -// - FieldID: When the original schema supports field ids, this will save the -// original field id in the parquet schema -// - LogicalType: The logical type of this SchemaElement -// -// LogicalType replaces ConvertedType, but ConvertedType is still required -// for some logical types to ensure forward-compatibility in format v1. -type SchemaElement struct { - Type *Type `thrift:"type,1" db:"type" json:"type,omitempty"` - TypeLength *int32 `thrift:"type_length,2" db:"type_length" json:"type_length,omitempty"` - RepetitionType *FieldRepetitionType `thrift:"repetition_type,3" db:"repetition_type" json:"repetition_type,omitempty"` - Name string `thrift:"name,4,required" db:"name" json:"name"` - NumChildren *int32 `thrift:"num_children,5" db:"num_children" json:"num_children,omitempty"` - ConvertedType *ConvertedType `thrift:"converted_type,6" db:"converted_type" json:"converted_type,omitempty"` - Scale *int32 `thrift:"scale,7" db:"scale" json:"scale,omitempty"` - Precision *int32 `thrift:"precision,8" db:"precision" json:"precision,omitempty"` - FieldID *int32 `thrift:"field_id,9" db:"field_id" json:"field_id,omitempty"` - LogicalType *LogicalType `thrift:"logicalType,10" db:"logicalType" json:"logicalType,omitempty"` -} - -func NewSchemaElement() *SchemaElement { - return &SchemaElement{} -} - -var SchemaElement_Type_DEFAULT Type -func (p *SchemaElement) GetType() Type { - if !p.IsSetType() { - return SchemaElement_Type_DEFAULT - } -return *p.Type -} -var SchemaElement_TypeLength_DEFAULT int32 -func (p *SchemaElement) GetTypeLength() int32 { - if !p.IsSetTypeLength() { - return SchemaElement_TypeLength_DEFAULT - } -return *p.TypeLength -} -var SchemaElement_RepetitionType_DEFAULT FieldRepetitionType -func (p *SchemaElement) GetRepetitionType() FieldRepetitionType { - if !p.IsSetRepetitionType() { - return SchemaElement_RepetitionType_DEFAULT - } -return *p.RepetitionType -} - -func (p *SchemaElement) GetName() string { - return p.Name -} -var SchemaElement_NumChildren_DEFAULT int32 -func (p *SchemaElement) GetNumChildren() int32 { - if !p.IsSetNumChildren() { - return SchemaElement_NumChildren_DEFAULT - } -return *p.NumChildren -} -var SchemaElement_ConvertedType_DEFAULT ConvertedType -func (p *SchemaElement) GetConvertedType() ConvertedType { - if !p.IsSetConvertedType() { - return SchemaElement_ConvertedType_DEFAULT - } -return *p.ConvertedType -} -var SchemaElement_Scale_DEFAULT int32 -func (p *SchemaElement) GetScale() int32 { - if !p.IsSetScale() { - return SchemaElement_Scale_DEFAULT - } -return *p.Scale -} -var SchemaElement_Precision_DEFAULT int32 -func (p *SchemaElement) GetPrecision() int32 { - if !p.IsSetPrecision() { - return SchemaElement_Precision_DEFAULT - } -return *p.Precision -} -var SchemaElement_FieldID_DEFAULT int32 -func (p *SchemaElement) GetFieldID() int32 { - if !p.IsSetFieldID() { - return SchemaElement_FieldID_DEFAULT - } -return *p.FieldID -} -var SchemaElement_LogicalType_DEFAULT *LogicalType -func (p *SchemaElement) GetLogicalType() *LogicalType { - if !p.IsSetLogicalType() { - return SchemaElement_LogicalType_DEFAULT - } -return p.LogicalType -} -func (p *SchemaElement) IsSetType() bool { - return p.Type != nil -} - -func (p *SchemaElement) IsSetTypeLength() bool { - return p.TypeLength != nil -} - -func (p *SchemaElement) IsSetRepetitionType() bool { - return p.RepetitionType != nil -} - -func (p *SchemaElement) IsSetNumChildren() bool { - return p.NumChildren != nil -} - -func (p *SchemaElement) IsSetConvertedType() bool { - return p.ConvertedType != nil -} - -func (p *SchemaElement) IsSetScale() bool { - return p.Scale != nil -} - -func (p *SchemaElement) IsSetPrecision() bool { - return p.Precision != nil -} - -func (p *SchemaElement) IsSetFieldID() bool { - return p.FieldID != nil -} - -func (p *SchemaElement) IsSetLogicalType() bool { - return p.LogicalType != nil -} - -func (p *SchemaElement) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetName bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRING { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetName = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I32 { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.I32 { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.I32 { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField10(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetName{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Name is not set")); - } - return nil -} - -func (p *SchemaElement) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := Type(v) - p.Type = &temp -} - return nil -} - -func (p *SchemaElement) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.TypeLength = &v -} - return nil -} - -func (p *SchemaElement) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - temp := FieldRepetitionType(v) - p.RepetitionType = &temp -} - return nil -} - -func (p *SchemaElement) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.Name = v -} - return nil -} - -func (p *SchemaElement) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.NumChildren = &v -} - return nil -} - -func (p *SchemaElement) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - temp := ConvertedType(v) - p.ConvertedType = &temp -} - return nil -} - -func (p *SchemaElement) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - p.Scale = &v -} - return nil -} - -func (p *SchemaElement) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 8: ", err) -} else { - p.Precision = &v -} - return nil -} - -func (p *SchemaElement) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) -} else { - p.FieldID = &v -} - return nil -} - -func (p *SchemaElement) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { - p.LogicalType = &LogicalType{} - if err := p.LogicalType.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.LogicalType), err) - } - return nil -} - -func (p *SchemaElement) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "SchemaElement"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - if err := p.writeField8(ctx, oprot); err != nil { return err } - if err := p.writeField9(ctx, oprot); err != nil { return err } - if err := p.writeField10(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *SchemaElement) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetType() { - if err := oprot.WriteFieldBegin(ctx, "type", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.Type)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) } - } - return err -} - -func (p *SchemaElement) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTypeLength() { - if err := oprot.WriteFieldBegin(ctx, "type_length", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:type_length: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.TypeLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.type_length (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:type_length: ", p), err) } - } - return err -} - -func (p *SchemaElement) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetRepetitionType() { - if err := oprot.WriteFieldBegin(ctx, "repetition_type", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:repetition_type: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.RepetitionType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.repetition_type (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:repetition_type: ", p), err) } - } - return err -} - -func (p *SchemaElement) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:name: ", p), err) } - if err := oprot.WriteString(ctx, string(p.Name)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.name (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:name: ", p), err) } - return err -} - -func (p *SchemaElement) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetNumChildren() { - if err := oprot.WriteFieldBegin(ctx, "num_children", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:num_children: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.NumChildren)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.num_children (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:num_children: ", p), err) } - } - return err -} - -func (p *SchemaElement) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetConvertedType() { - if err := oprot.WriteFieldBegin(ctx, "converted_type", thrift.I32, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:converted_type: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.ConvertedType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.converted_type (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:converted_type: ", p), err) } - } - return err -} - -func (p *SchemaElement) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetScale() { - if err := oprot.WriteFieldBegin(ctx, "scale", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:scale: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.Scale)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.scale (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:scale: ", p), err) } - } - return err -} - -func (p *SchemaElement) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetPrecision() { - if err := oprot.WriteFieldBegin(ctx, "precision", thrift.I32, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:precision: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.Precision)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.precision (8) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:precision: ", p), err) } - } - return err -} - -func (p *SchemaElement) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetFieldID() { - if err := oprot.WriteFieldBegin(ctx, "field_id", thrift.I32, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:field_id: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.FieldID)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.field_id (9) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:field_id: ", p), err) } - } - return err -} - -func (p *SchemaElement) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetLogicalType() { - if err := oprot.WriteFieldBegin(ctx, "logicalType", thrift.STRUCT, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:logicalType: ", p), err) } - if err := p.LogicalType.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.LogicalType), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:logicalType: ", p), err) } - } - return err -} - -func (p *SchemaElement) Equals(other *SchemaElement) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Type != other.Type { - if p.Type == nil || other.Type == nil { - return false - } - if (*p.Type) != (*other.Type) { return false } - } - if p.TypeLength != other.TypeLength { - if p.TypeLength == nil || other.TypeLength == nil { - return false - } - if (*p.TypeLength) != (*other.TypeLength) { return false } - } - if p.RepetitionType != other.RepetitionType { - if p.RepetitionType == nil || other.RepetitionType == nil { - return false - } - if (*p.RepetitionType) != (*other.RepetitionType) { return false } - } - if p.Name != other.Name { return false } - if p.NumChildren != other.NumChildren { - if p.NumChildren == nil || other.NumChildren == nil { - return false - } - if (*p.NumChildren) != (*other.NumChildren) { return false } - } - if p.ConvertedType != other.ConvertedType { - if p.ConvertedType == nil || other.ConvertedType == nil { - return false - } - if (*p.ConvertedType) != (*other.ConvertedType) { return false } - } - if p.Scale != other.Scale { - if p.Scale == nil || other.Scale == nil { - return false - } - if (*p.Scale) != (*other.Scale) { return false } - } - if p.Precision != other.Precision { - if p.Precision == nil || other.Precision == nil { - return false - } - if (*p.Precision) != (*other.Precision) { return false } - } - if p.FieldID != other.FieldID { - if p.FieldID == nil || other.FieldID == nil { - return false - } - if (*p.FieldID) != (*other.FieldID) { return false } - } - if !p.LogicalType.Equals(other.LogicalType) { return false } - return true -} - -func (p *SchemaElement) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SchemaElement(%+v)", *p) -} - -// Data page header -// -// Attributes: -// - NumValues: Number of values, including NULLs, in this data page. * -// - Encoding: Encoding used for this data page * -// - DefinitionLevelEncoding: Encoding used for definition levels * -// - RepetitionLevelEncoding: Encoding used for repetition levels * -// - Statistics: Optional statistics for the data in this page* -type DataPageHeader struct { - NumValues int32 `thrift:"num_values,1,required" db:"num_values" json:"num_values"` - Encoding Encoding `thrift:"encoding,2,required" db:"encoding" json:"encoding"` - DefinitionLevelEncoding Encoding `thrift:"definition_level_encoding,3,required" db:"definition_level_encoding" json:"definition_level_encoding"` - RepetitionLevelEncoding Encoding `thrift:"repetition_level_encoding,4,required" db:"repetition_level_encoding" json:"repetition_level_encoding"` - Statistics *Statistics `thrift:"statistics,5" db:"statistics" json:"statistics,omitempty"` -} - -func NewDataPageHeader() *DataPageHeader { - return &DataPageHeader{} -} - - -func (p *DataPageHeader) GetNumValues() int32 { - return p.NumValues -} - -func (p *DataPageHeader) GetEncoding() Encoding { - return p.Encoding -} - -func (p *DataPageHeader) GetDefinitionLevelEncoding() Encoding { - return p.DefinitionLevelEncoding -} - -func (p *DataPageHeader) GetRepetitionLevelEncoding() Encoding { - return p.RepetitionLevelEncoding -} -var DataPageHeader_Statistics_DEFAULT *Statistics -func (p *DataPageHeader) GetStatistics() *Statistics { - if !p.IsSetStatistics() { - return DataPageHeader_Statistics_DEFAULT - } -return p.Statistics -} -func (p *DataPageHeader) IsSetStatistics() bool { - return p.Statistics != nil -} - -func (p *DataPageHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetNumValues bool = false; - var issetEncoding bool = false; - var issetDefinitionLevelEncoding bool = false; - var issetRepetitionLevelEncoding bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetNumValues = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetEncoding = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetDefinitionLevelEncoding = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetRepetitionLevelEncoding = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetNumValues{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumValues is not set")); - } - if !issetEncoding{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encoding is not set")); - } - if !issetDefinitionLevelEncoding{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefinitionLevelEncoding is not set")); - } - if !issetRepetitionLevelEncoding{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RepetitionLevelEncoding is not set")); - } - return nil -} - -func (p *DataPageHeader) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.NumValues = v -} - return nil -} - -func (p *DataPageHeader) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := Encoding(v) - p.Encoding = temp -} - return nil -} - -func (p *DataPageHeader) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - temp := Encoding(v) - p.DefinitionLevelEncoding = temp -} - return nil -} - -func (p *DataPageHeader) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - temp := Encoding(v) - p.RepetitionLevelEncoding = temp -} - return nil -} - -func (p *DataPageHeader) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - p.Statistics = &Statistics{} - if err := p.Statistics.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Statistics), err) - } - return nil -} - -func (p *DataPageHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "DataPageHeader"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *DataPageHeader) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "num_values", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:num_values: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.NumValues)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.num_values (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:num_values: ", p), err) } - return err -} - -func (p *DataPageHeader) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "encoding", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:encoding: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Encoding)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.encoding (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:encoding: ", p), err) } - return err -} - -func (p *DataPageHeader) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "definition_level_encoding", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:definition_level_encoding: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.DefinitionLevelEncoding)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.definition_level_encoding (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:definition_level_encoding: ", p), err) } - return err -} - -func (p *DataPageHeader) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "repetition_level_encoding", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:repetition_level_encoding: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.RepetitionLevelEncoding)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.repetition_level_encoding (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:repetition_level_encoding: ", p), err) } - return err -} - -func (p *DataPageHeader) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetStatistics() { - if err := oprot.WriteFieldBegin(ctx, "statistics", thrift.STRUCT, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:statistics: ", p), err) } - if err := p.Statistics.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Statistics), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:statistics: ", p), err) } - } - return err -} - -func (p *DataPageHeader) Equals(other *DataPageHeader) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.NumValues != other.NumValues { return false } - if p.Encoding != other.Encoding { return false } - if p.DefinitionLevelEncoding != other.DefinitionLevelEncoding { return false } - if p.RepetitionLevelEncoding != other.RepetitionLevelEncoding { return false } - if !p.Statistics.Equals(other.Statistics) { return false } - return true -} - -func (p *DataPageHeader) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DataPageHeader(%+v)", *p) -} - -type IndexPageHeader struct { -} - -func NewIndexPageHeader() *IndexPageHeader { - return &IndexPageHeader{} -} - -func (p *IndexPageHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *IndexPageHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "IndexPageHeader"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *IndexPageHeader) Equals(other *IndexPageHeader) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *IndexPageHeader) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("IndexPageHeader(%+v)", *p) -} - -// Attributes: -// - NumValues: Number of values in the dictionary * -// - Encoding: Encoding using this dictionary page * -// - IsSorted: If true, the entries in the dictionary are sorted in ascending order * -type DictionaryPageHeader struct { - NumValues int32 `thrift:"num_values,1,required" db:"num_values" json:"num_values"` - Encoding Encoding `thrift:"encoding,2,required" db:"encoding" json:"encoding"` - IsSorted *bool `thrift:"is_sorted,3" db:"is_sorted" json:"is_sorted,omitempty"` -} - -func NewDictionaryPageHeader() *DictionaryPageHeader { - return &DictionaryPageHeader{} -} - - -func (p *DictionaryPageHeader) GetNumValues() int32 { - return p.NumValues -} - -func (p *DictionaryPageHeader) GetEncoding() Encoding { - return p.Encoding -} -var DictionaryPageHeader_IsSorted_DEFAULT bool -func (p *DictionaryPageHeader) GetIsSorted() bool { - if !p.IsSetIsSorted() { - return DictionaryPageHeader_IsSorted_DEFAULT - } -return *p.IsSorted -} -func (p *DictionaryPageHeader) IsSetIsSorted() bool { - return p.IsSorted != nil -} - -func (p *DictionaryPageHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetNumValues bool = false; - var issetEncoding bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetNumValues = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetEncoding = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetNumValues{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumValues is not set")); - } - if !issetEncoding{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encoding is not set")); - } - return nil -} - -func (p *DictionaryPageHeader) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.NumValues = v -} - return nil -} - -func (p *DictionaryPageHeader) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := Encoding(v) - p.Encoding = temp -} - return nil -} - -func (p *DictionaryPageHeader) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.IsSorted = &v -} - return nil -} - -func (p *DictionaryPageHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "DictionaryPageHeader"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *DictionaryPageHeader) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "num_values", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:num_values: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.NumValues)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.num_values (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:num_values: ", p), err) } - return err -} - -func (p *DictionaryPageHeader) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "encoding", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:encoding: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Encoding)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.encoding (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:encoding: ", p), err) } - return err -} - -func (p *DictionaryPageHeader) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetIsSorted() { - if err := oprot.WriteFieldBegin(ctx, "is_sorted", thrift.BOOL, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:is_sorted: ", p), err) } - if err := oprot.WriteBool(ctx, bool(*p.IsSorted)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.is_sorted (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:is_sorted: ", p), err) } - } - return err -} - -func (p *DictionaryPageHeader) Equals(other *DictionaryPageHeader) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.NumValues != other.NumValues { return false } - if p.Encoding != other.Encoding { return false } - if p.IsSorted != other.IsSorted { - if p.IsSorted == nil || other.IsSorted == nil { - return false - } - if (*p.IsSorted) != (*other.IsSorted) { return false } - } - return true -} - -func (p *DictionaryPageHeader) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DictionaryPageHeader(%+v)", *p) -} - -// New page format allowing reading levels without decompressing the data -// Repetition and definition levels are uncompressed -// The remaining section containing the data is compressed if is_compressed is true -// -// -// Attributes: -// - NumValues: Number of values, including NULLs, in this data page. * -// - NumNulls: Number of NULL values, in this data page. -// Number of non-null = num_values - num_nulls which is also the number of values in the data section * -// - NumRows: Number of rows in this data page. which means pages change on record boundaries (r = 0) * -// - Encoding: Encoding used for data in this page * -// - DefinitionLevelsByteLength: length of the definition levels -// - RepetitionLevelsByteLength: length of the repetition levels -// - IsCompressed: whether the values are compressed. -// Which means the section of the page between -// definition_levels_byte_length + repetition_levels_byte_length + 1 and compressed_page_size (included) -// is compressed with the compression_codec. -// If missing it is considered compressed -// - Statistics: optional statistics for the data in this page * -type DataPageHeaderV2 struct { - NumValues int32 `thrift:"num_values,1,required" db:"num_values" json:"num_values"` - NumNulls int32 `thrift:"num_nulls,2,required" db:"num_nulls" json:"num_nulls"` - NumRows int32 `thrift:"num_rows,3,required" db:"num_rows" json:"num_rows"` - Encoding Encoding `thrift:"encoding,4,required" db:"encoding" json:"encoding"` - DefinitionLevelsByteLength int32 `thrift:"definition_levels_byte_length,5,required" db:"definition_levels_byte_length" json:"definition_levels_byte_length"` - RepetitionLevelsByteLength int32 `thrift:"repetition_levels_byte_length,6,required" db:"repetition_levels_byte_length" json:"repetition_levels_byte_length"` - IsCompressed bool `thrift:"is_compressed,7" db:"is_compressed" json:"is_compressed"` - Statistics *Statistics `thrift:"statistics,8" db:"statistics" json:"statistics,omitempty"` -} - -func NewDataPageHeaderV2() *DataPageHeaderV2 { - return &DataPageHeaderV2{ -IsCompressed: true, -} -} - - -func (p *DataPageHeaderV2) GetNumValues() int32 { - return p.NumValues -} - -func (p *DataPageHeaderV2) GetNumNulls() int32 { - return p.NumNulls -} - -func (p *DataPageHeaderV2) GetNumRows() int32 { - return p.NumRows -} - -func (p *DataPageHeaderV2) GetEncoding() Encoding { - return p.Encoding -} - -func (p *DataPageHeaderV2) GetDefinitionLevelsByteLength() int32 { - return p.DefinitionLevelsByteLength -} - -func (p *DataPageHeaderV2) GetRepetitionLevelsByteLength() int32 { - return p.RepetitionLevelsByteLength -} -var DataPageHeaderV2_IsCompressed_DEFAULT bool = true - -func (p *DataPageHeaderV2) GetIsCompressed() bool { - return p.IsCompressed -} -var DataPageHeaderV2_Statistics_DEFAULT *Statistics -func (p *DataPageHeaderV2) GetStatistics() *Statistics { - if !p.IsSetStatistics() { - return DataPageHeaderV2_Statistics_DEFAULT - } -return p.Statistics -} -func (p *DataPageHeaderV2) IsSetIsCompressed() bool { - return p.IsCompressed != DataPageHeaderV2_IsCompressed_DEFAULT -} - -func (p *DataPageHeaderV2) IsSetStatistics() bool { - return p.Statistics != nil -} - -func (p *DataPageHeaderV2) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetNumValues bool = false; - var issetNumNulls bool = false; - var issetNumRows bool = false; - var issetEncoding bool = false; - var issetDefinitionLevelsByteLength bool = false; - var issetRepetitionLevelsByteLength bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetNumValues = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetNumNulls = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetNumRows = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetEncoding = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - issetDefinitionLevelsByteLength = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I32 { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - issetRepetitionLevelsByteLength = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetNumValues{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumValues is not set")); - } - if !issetNumNulls{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumNulls is not set")); - } - if !issetNumRows{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumRows is not set")); - } - if !issetEncoding{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encoding is not set")); - } - if !issetDefinitionLevelsByteLength{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefinitionLevelsByteLength is not set")); - } - if !issetRepetitionLevelsByteLength{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RepetitionLevelsByteLength is not set")); - } - return nil -} - -func (p *DataPageHeaderV2) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.NumValues = v -} - return nil -} - -func (p *DataPageHeaderV2) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.NumNulls = v -} - return nil -} - -func (p *DataPageHeaderV2) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.NumRows = v -} - return nil -} - -func (p *DataPageHeaderV2) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - temp := Encoding(v) - p.Encoding = temp -} - return nil -} - -func (p *DataPageHeaderV2) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.DefinitionLevelsByteLength = v -} - return nil -} - -func (p *DataPageHeaderV2) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - p.RepetitionLevelsByteLength = v -} - return nil -} - -func (p *DataPageHeaderV2) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - p.IsCompressed = v -} - return nil -} - -func (p *DataPageHeaderV2) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - p.Statistics = &Statistics{} - if err := p.Statistics.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Statistics), err) - } - return nil -} - -func (p *DataPageHeaderV2) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "DataPageHeaderV2"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - if err := p.writeField8(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *DataPageHeaderV2) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "num_values", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:num_values: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.NumValues)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.num_values (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:num_values: ", p), err) } - return err -} - -func (p *DataPageHeaderV2) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "num_nulls", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:num_nulls: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.NumNulls)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.num_nulls (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:num_nulls: ", p), err) } - return err -} - -func (p *DataPageHeaderV2) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "num_rows", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:num_rows: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.NumRows)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.num_rows (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:num_rows: ", p), err) } - return err -} - -func (p *DataPageHeaderV2) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "encoding", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:encoding: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Encoding)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.encoding (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:encoding: ", p), err) } - return err -} - -func (p *DataPageHeaderV2) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "definition_levels_byte_length", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:definition_levels_byte_length: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.DefinitionLevelsByteLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.definition_levels_byte_length (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:definition_levels_byte_length: ", p), err) } - return err -} - -func (p *DataPageHeaderV2) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "repetition_levels_byte_length", thrift.I32, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:repetition_levels_byte_length: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.RepetitionLevelsByteLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.repetition_levels_byte_length (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:repetition_levels_byte_length: ", p), err) } - return err -} - -func (p *DataPageHeaderV2) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetIsCompressed() { - if err := oprot.WriteFieldBegin(ctx, "is_compressed", thrift.BOOL, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_compressed: ", p), err) } - if err := oprot.WriteBool(ctx, bool(p.IsCompressed)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.is_compressed (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:is_compressed: ", p), err) } - } - return err -} - -func (p *DataPageHeaderV2) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetStatistics() { - if err := oprot.WriteFieldBegin(ctx, "statistics", thrift.STRUCT, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:statistics: ", p), err) } - if err := p.Statistics.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Statistics), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:statistics: ", p), err) } - } - return err -} - -func (p *DataPageHeaderV2) Equals(other *DataPageHeaderV2) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.NumValues != other.NumValues { return false } - if p.NumNulls != other.NumNulls { return false } - if p.NumRows != other.NumRows { return false } - if p.Encoding != other.Encoding { return false } - if p.DefinitionLevelsByteLength != other.DefinitionLevelsByteLength { return false } - if p.RepetitionLevelsByteLength != other.RepetitionLevelsByteLength { return false } - if p.IsCompressed != other.IsCompressed { return false } - if !p.Statistics.Equals(other.Statistics) { return false } - return true -} - -func (p *DataPageHeaderV2) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("DataPageHeaderV2(%+v)", *p) -} - -// Block-based algorithm type annotation. * -type SplitBlockAlgorithm struct { -} - -func NewSplitBlockAlgorithm() *SplitBlockAlgorithm { - return &SplitBlockAlgorithm{} -} - -func (p *SplitBlockAlgorithm) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *SplitBlockAlgorithm) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "SplitBlockAlgorithm"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *SplitBlockAlgorithm) Equals(other *SplitBlockAlgorithm) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *SplitBlockAlgorithm) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SplitBlockAlgorithm(%+v)", *p) -} - -// The algorithm used in Bloom filter. * -// -// Attributes: -// - BLOCK: Block-based Bloom filter. * -type BloomFilterAlgorithm struct { - BLOCK *SplitBlockAlgorithm `thrift:"BLOCK,1" db:"BLOCK" json:"BLOCK,omitempty"` -} - -func NewBloomFilterAlgorithm() *BloomFilterAlgorithm { - return &BloomFilterAlgorithm{} -} - -var BloomFilterAlgorithm_BLOCK_DEFAULT *SplitBlockAlgorithm -func (p *BloomFilterAlgorithm) GetBLOCK() *SplitBlockAlgorithm { - if !p.IsSetBLOCK() { - return BloomFilterAlgorithm_BLOCK_DEFAULT - } -return p.BLOCK -} -func (p *BloomFilterAlgorithm) CountSetFieldsBloomFilterAlgorithm() int { - count := 0 - if (p.IsSetBLOCK()) { - count++ - } - return count - -} - -func (p *BloomFilterAlgorithm) IsSetBLOCK() bool { - return p.BLOCK != nil -} - -func (p *BloomFilterAlgorithm) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BloomFilterAlgorithm) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.BLOCK = &SplitBlockAlgorithm{} - if err := p.BLOCK.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.BLOCK), err) - } - return nil -} - -func (p *BloomFilterAlgorithm) Write(ctx context.Context, oprot thrift.TProtocol) error { - if c := p.CountSetFieldsBloomFilterAlgorithm(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) - } - if err := oprot.WriteStructBegin(ctx, "BloomFilterAlgorithm"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BloomFilterAlgorithm) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetBLOCK() { - if err := oprot.WriteFieldBegin(ctx, "BLOCK", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:BLOCK: ", p), err) } - if err := p.BLOCK.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.BLOCK), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:BLOCK: ", p), err) } - } - return err -} - -func (p *BloomFilterAlgorithm) Equals(other *BloomFilterAlgorithm) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.BLOCK.Equals(other.BLOCK) { return false } - return true -} - -func (p *BloomFilterAlgorithm) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BloomFilterAlgorithm(%+v)", *p) -} - -// Hash strategy type annotation. xxHash is an extremely fast non-cryptographic hash -// algorithm. It uses 64 bits version of xxHash. -// -type XxHash struct { -} - -func NewXxHash() *XxHash { - return &XxHash{} -} - -func (p *XxHash) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *XxHash) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "XxHash"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *XxHash) Equals(other *XxHash) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *XxHash) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("XxHash(%+v)", *p) -} - -// The hash function used in Bloom filter. This function takes the hash of a column value -// using plain encoding. -// -// -// Attributes: -// - XXHASH: xxHash Strategy. * -type BloomFilterHash struct { - XXHASH *XxHash `thrift:"XXHASH,1" db:"XXHASH" json:"XXHASH,omitempty"` -} - -func NewBloomFilterHash() *BloomFilterHash { - return &BloomFilterHash{} -} - -var BloomFilterHash_XXHASH_DEFAULT *XxHash -func (p *BloomFilterHash) GetXXHASH() *XxHash { - if !p.IsSetXXHASH() { - return BloomFilterHash_XXHASH_DEFAULT - } -return p.XXHASH -} -func (p *BloomFilterHash) CountSetFieldsBloomFilterHash() int { - count := 0 - if (p.IsSetXXHASH()) { - count++ - } - return count - -} - -func (p *BloomFilterHash) IsSetXXHASH() bool { - return p.XXHASH != nil -} - -func (p *BloomFilterHash) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BloomFilterHash) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.XXHASH = &XxHash{} - if err := p.XXHASH.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.XXHASH), err) - } - return nil -} - -func (p *BloomFilterHash) Write(ctx context.Context, oprot thrift.TProtocol) error { - if c := p.CountSetFieldsBloomFilterHash(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) - } - if err := oprot.WriteStructBegin(ctx, "BloomFilterHash"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BloomFilterHash) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetXXHASH() { - if err := oprot.WriteFieldBegin(ctx, "XXHASH", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:XXHASH: ", p), err) } - if err := p.XXHASH.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.XXHASH), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:XXHASH: ", p), err) } - } - return err -} - -func (p *BloomFilterHash) Equals(other *BloomFilterHash) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.XXHASH.Equals(other.XXHASH) { return false } - return true -} - -func (p *BloomFilterHash) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BloomFilterHash(%+v)", *p) -} - -// The compression used in the Bloom filter. -// -type Uncompressed struct { -} - -func NewUncompressed() *Uncompressed { - return &Uncompressed{} -} - -func (p *Uncompressed) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *Uncompressed) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "Uncompressed"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *Uncompressed) Equals(other *Uncompressed) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *Uncompressed) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Uncompressed(%+v)", *p) -} - -// Attributes: -// - UNCOMPRESSED -type BloomFilterCompression struct { - UNCOMPRESSED *Uncompressed `thrift:"UNCOMPRESSED,1" db:"UNCOMPRESSED" json:"UNCOMPRESSED,omitempty"` -} - -func NewBloomFilterCompression() *BloomFilterCompression { - return &BloomFilterCompression{} -} - -var BloomFilterCompression_UNCOMPRESSED_DEFAULT *Uncompressed -func (p *BloomFilterCompression) GetUNCOMPRESSED() *Uncompressed { - if !p.IsSetUNCOMPRESSED() { - return BloomFilterCompression_UNCOMPRESSED_DEFAULT - } -return p.UNCOMPRESSED -} -func (p *BloomFilterCompression) CountSetFieldsBloomFilterCompression() int { - count := 0 - if (p.IsSetUNCOMPRESSED()) { - count++ - } - return count - -} - -func (p *BloomFilterCompression) IsSetUNCOMPRESSED() bool { - return p.UNCOMPRESSED != nil -} - -func (p *BloomFilterCompression) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *BloomFilterCompression) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.UNCOMPRESSED = &Uncompressed{} - if err := p.UNCOMPRESSED.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.UNCOMPRESSED), err) - } - return nil -} - -func (p *BloomFilterCompression) Write(ctx context.Context, oprot thrift.TProtocol) error { - if c := p.CountSetFieldsBloomFilterCompression(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) - } - if err := oprot.WriteStructBegin(ctx, "BloomFilterCompression"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BloomFilterCompression) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetUNCOMPRESSED() { - if err := oprot.WriteFieldBegin(ctx, "UNCOMPRESSED", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:UNCOMPRESSED: ", p), err) } - if err := p.UNCOMPRESSED.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.UNCOMPRESSED), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:UNCOMPRESSED: ", p), err) } - } - return err -} - -func (p *BloomFilterCompression) Equals(other *BloomFilterCompression) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.UNCOMPRESSED.Equals(other.UNCOMPRESSED) { return false } - return true -} - -func (p *BloomFilterCompression) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BloomFilterCompression(%+v)", *p) -} - -// Bloom filter header is stored at beginning of Bloom filter data of each column -// and followed by its bitset. -// -// -// Attributes: -// - NumBytes: The size of bitset in bytes * -// - Algorithm: The algorithm for setting bits. * -// - Hash: The hash function used for Bloom filter. * -// - Compression: The compression used in the Bloom filter * -type BloomFilterHeader struct { - NumBytes int32 `thrift:"numBytes,1,required" db:"numBytes" json:"numBytes"` - Algorithm *BloomFilterAlgorithm `thrift:"algorithm,2,required" db:"algorithm" json:"algorithm"` - Hash *BloomFilterHash `thrift:"hash,3,required" db:"hash" json:"hash"` - Compression *BloomFilterCompression `thrift:"compression,4,required" db:"compression" json:"compression"` -} - -func NewBloomFilterHeader() *BloomFilterHeader { - return &BloomFilterHeader{} -} - - -func (p *BloomFilterHeader) GetNumBytes() int32 { - return p.NumBytes -} -var BloomFilterHeader_Algorithm_DEFAULT *BloomFilterAlgorithm -func (p *BloomFilterHeader) GetAlgorithm() *BloomFilterAlgorithm { - if !p.IsSetAlgorithm() { - return BloomFilterHeader_Algorithm_DEFAULT - } -return p.Algorithm -} -var BloomFilterHeader_Hash_DEFAULT *BloomFilterHash -func (p *BloomFilterHeader) GetHash() *BloomFilterHash { - if !p.IsSetHash() { - return BloomFilterHeader_Hash_DEFAULT - } -return p.Hash -} -var BloomFilterHeader_Compression_DEFAULT *BloomFilterCompression -func (p *BloomFilterHeader) GetCompression() *BloomFilterCompression { - if !p.IsSetCompression() { - return BloomFilterHeader_Compression_DEFAULT - } -return p.Compression -} -func (p *BloomFilterHeader) IsSetAlgorithm() bool { - return p.Algorithm != nil -} - -func (p *BloomFilterHeader) IsSetHash() bool { - return p.Hash != nil -} - -func (p *BloomFilterHeader) IsSetCompression() bool { - return p.Compression != nil -} - -func (p *BloomFilterHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetNumBytes bool = false; - var issetAlgorithm bool = false; - var issetHash bool = false; - var issetCompression bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetNumBytes = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetAlgorithm = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetHash = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetCompression = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetNumBytes{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumBytes is not set")); - } - if !issetAlgorithm{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Algorithm is not set")); - } - if !issetHash{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Hash is not set")); - } - if !issetCompression{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Compression is not set")); - } - return nil -} - -func (p *BloomFilterHeader) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.NumBytes = v -} - return nil -} - -func (p *BloomFilterHeader) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - p.Algorithm = &BloomFilterAlgorithm{} - if err := p.Algorithm.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Algorithm), err) - } - return nil -} - -func (p *BloomFilterHeader) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - p.Hash = &BloomFilterHash{} - if err := p.Hash.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Hash), err) - } - return nil -} - -func (p *BloomFilterHeader) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - p.Compression = &BloomFilterCompression{} - if err := p.Compression.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Compression), err) - } - return nil -} - -func (p *BloomFilterHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "BloomFilterHeader"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *BloomFilterHeader) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "numBytes", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:numBytes: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.NumBytes)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.numBytes (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:numBytes: ", p), err) } - return err -} - -func (p *BloomFilterHeader) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "algorithm", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:algorithm: ", p), err) } - if err := p.Algorithm.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Algorithm), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:algorithm: ", p), err) } - return err -} - -func (p *BloomFilterHeader) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "hash", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:hash: ", p), err) } - if err := p.Hash.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Hash), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:hash: ", p), err) } - return err -} - -func (p *BloomFilterHeader) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "compression", thrift.STRUCT, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:compression: ", p), err) } - if err := p.Compression.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Compression), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:compression: ", p), err) } - return err -} - -func (p *BloomFilterHeader) Equals(other *BloomFilterHeader) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.NumBytes != other.NumBytes { return false } - if !p.Algorithm.Equals(other.Algorithm) { return false } - if !p.Hash.Equals(other.Hash) { return false } - if !p.Compression.Equals(other.Compression) { return false } - return true -} - -func (p *BloomFilterHeader) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BloomFilterHeader(%+v)", *p) -} - -// Attributes: -// - Type: the type of the page: indicates which of the *_header fields is set * -// - UncompressedPageSize: Uncompressed page size in bytes (not including this header) * -// - CompressedPageSize: Compressed (and potentially encrypted) page size in bytes, not including this header * -// - Crc: The 32bit CRC for the page, to be be calculated as follows: -// - Using the standard CRC32 algorithm -// - On the data only, i.e. this header should not be included. 'Data' -// hereby refers to the concatenation of the repetition levels, the -// definition levels and the column value, in this exact order. -// - On the encoded versions of the repetition levels, definition levels and -// column values -// - On the compressed versions of the repetition levels, definition levels -// and column values where possible; -// - For v1 data pages, the repetition levels, definition levels and column -// values are always compressed together. If a compression scheme is -// specified, the CRC shall be calculated on the compressed version of -// this concatenation. If no compression scheme is specified, the CRC -// shall be calculated on the uncompressed version of this concatenation. -// - For v2 data pages, the repetition levels and definition levels are -// handled separately from the data and are never compressed (only -// encoded). If a compression scheme is specified, the CRC shall be -// calculated on the concatenation of the uncompressed repetition levels, -// uncompressed definition levels and the compressed column values. -// If no compression scheme is specified, the CRC shall be calculated on -// the uncompressed concatenation. -// - In encrypted columns, CRC is calculated after page encryption; the -// encryption itself is performed after page compression (if compressed) -// If enabled, this allows for disabling checksumming in HDFS if only a few -// pages need to be read. -// -// - DataPageHeader -// - IndexPageHeader -// - DictionaryPageHeader -// - DataPageHeaderV2 -type PageHeader struct { - Type PageType `thrift:"type,1,required" db:"type" json:"type"` - UncompressedPageSize int32 `thrift:"uncompressed_page_size,2,required" db:"uncompressed_page_size" json:"uncompressed_page_size"` - CompressedPageSize int32 `thrift:"compressed_page_size,3,required" db:"compressed_page_size" json:"compressed_page_size"` - Crc *int32 `thrift:"crc,4" db:"crc" json:"crc,omitempty"` - DataPageHeader *DataPageHeader `thrift:"data_page_header,5" db:"data_page_header" json:"data_page_header,omitempty"` - IndexPageHeader *IndexPageHeader `thrift:"index_page_header,6" db:"index_page_header" json:"index_page_header,omitempty"` - DictionaryPageHeader *DictionaryPageHeader `thrift:"dictionary_page_header,7" db:"dictionary_page_header" json:"dictionary_page_header,omitempty"` - DataPageHeaderV2 *DataPageHeaderV2 `thrift:"data_page_header_v2,8" db:"data_page_header_v2" json:"data_page_header_v2,omitempty"` -} - -func NewPageHeader() *PageHeader { - return &PageHeader{} -} - - -func (p *PageHeader) GetType() PageType { - return p.Type -} - -func (p *PageHeader) GetUncompressedPageSize() int32 { - return p.UncompressedPageSize -} - -func (p *PageHeader) GetCompressedPageSize() int32 { - return p.CompressedPageSize -} -var PageHeader_Crc_DEFAULT int32 -func (p *PageHeader) GetCrc() int32 { - if !p.IsSetCrc() { - return PageHeader_Crc_DEFAULT - } -return *p.Crc -} -var PageHeader_DataPageHeader_DEFAULT *DataPageHeader -func (p *PageHeader) GetDataPageHeader() *DataPageHeader { - if !p.IsSetDataPageHeader() { - return PageHeader_DataPageHeader_DEFAULT - } -return p.DataPageHeader -} -var PageHeader_IndexPageHeader_DEFAULT *IndexPageHeader -func (p *PageHeader) GetIndexPageHeader() *IndexPageHeader { - if !p.IsSetIndexPageHeader() { - return PageHeader_IndexPageHeader_DEFAULT - } -return p.IndexPageHeader -} -var PageHeader_DictionaryPageHeader_DEFAULT *DictionaryPageHeader -func (p *PageHeader) GetDictionaryPageHeader() *DictionaryPageHeader { - if !p.IsSetDictionaryPageHeader() { - return PageHeader_DictionaryPageHeader_DEFAULT - } -return p.DictionaryPageHeader -} -var PageHeader_DataPageHeaderV2_DEFAULT *DataPageHeaderV2 -func (p *PageHeader) GetDataPageHeaderV2() *DataPageHeaderV2 { - if !p.IsSetDataPageHeaderV2() { - return PageHeader_DataPageHeaderV2_DEFAULT - } -return p.DataPageHeaderV2 -} -func (p *PageHeader) IsSetCrc() bool { - return p.Crc != nil -} - -func (p *PageHeader) IsSetDataPageHeader() bool { - return p.DataPageHeader != nil -} - -func (p *PageHeader) IsSetIndexPageHeader() bool { - return p.IndexPageHeader != nil -} - -func (p *PageHeader) IsSetDictionaryPageHeader() bool { - return p.DictionaryPageHeader != nil -} - -func (p *PageHeader) IsSetDataPageHeaderV2() bool { - return p.DataPageHeaderV2 != nil -} - -func (p *PageHeader) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetType bool = false; - var issetUncompressedPageSize bool = false; - var issetCompressedPageSize bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetUncompressedPageSize = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetCompressedPageSize = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetType{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Type is not set")); - } - if !issetUncompressedPageSize{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field UncompressedPageSize is not set")); - } - if !issetCompressedPageSize{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field CompressedPageSize is not set")); - } - return nil -} - -func (p *PageHeader) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := PageType(v) - p.Type = temp -} - return nil -} - -func (p *PageHeader) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.UncompressedPageSize = v -} - return nil -} - -func (p *PageHeader) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.CompressedPageSize = v -} - return nil -} - -func (p *PageHeader) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.Crc = &v -} - return nil -} - -func (p *PageHeader) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - p.DataPageHeader = &DataPageHeader{} - if err := p.DataPageHeader.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DataPageHeader), err) - } - return nil -} - -func (p *PageHeader) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - p.IndexPageHeader = &IndexPageHeader{} - if err := p.IndexPageHeader.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.IndexPageHeader), err) - } - return nil -} - -func (p *PageHeader) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - p.DictionaryPageHeader = &DictionaryPageHeader{} - if err := p.DictionaryPageHeader.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DictionaryPageHeader), err) - } - return nil -} - -func (p *PageHeader) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - p.DataPageHeaderV2 = &DataPageHeaderV2{ - IsCompressed: true, -} - if err := p.DataPageHeaderV2.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.DataPageHeaderV2), err) - } - return nil -} - -func (p *PageHeader) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "PageHeader"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - if err := p.writeField8(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *PageHeader) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "type", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Type)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) } - return err -} - -func (p *PageHeader) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "uncompressed_page_size", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:uncompressed_page_size: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.UncompressedPageSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.uncompressed_page_size (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:uncompressed_page_size: ", p), err) } - return err -} - -func (p *PageHeader) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "compressed_page_size", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:compressed_page_size: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.CompressedPageSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.compressed_page_size (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:compressed_page_size: ", p), err) } - return err -} - -func (p *PageHeader) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetCrc() { - if err := oprot.WriteFieldBegin(ctx, "crc", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:crc: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.Crc)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.crc (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:crc: ", p), err) } - } - return err -} - -func (p *PageHeader) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDataPageHeader() { - if err := oprot.WriteFieldBegin(ctx, "data_page_header", thrift.STRUCT, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:data_page_header: ", p), err) } - if err := p.DataPageHeader.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DataPageHeader), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:data_page_header: ", p), err) } - } - return err -} - -func (p *PageHeader) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetIndexPageHeader() { - if err := oprot.WriteFieldBegin(ctx, "index_page_header", thrift.STRUCT, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:index_page_header: ", p), err) } - if err := p.IndexPageHeader.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.IndexPageHeader), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:index_page_header: ", p), err) } - } - return err -} - -func (p *PageHeader) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDictionaryPageHeader() { - if err := oprot.WriteFieldBegin(ctx, "dictionary_page_header", thrift.STRUCT, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:dictionary_page_header: ", p), err) } - if err := p.DictionaryPageHeader.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DictionaryPageHeader), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:dictionary_page_header: ", p), err) } - } - return err -} - -func (p *PageHeader) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDataPageHeaderV2() { - if err := oprot.WriteFieldBegin(ctx, "data_page_header_v2", thrift.STRUCT, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:data_page_header_v2: ", p), err) } - if err := p.DataPageHeaderV2.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.DataPageHeaderV2), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:data_page_header_v2: ", p), err) } - } - return err -} - -func (p *PageHeader) Equals(other *PageHeader) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Type != other.Type { return false } - if p.UncompressedPageSize != other.UncompressedPageSize { return false } - if p.CompressedPageSize != other.CompressedPageSize { return false } - if p.Crc != other.Crc { - if p.Crc == nil || other.Crc == nil { - return false - } - if (*p.Crc) != (*other.Crc) { return false } - } - if !p.DataPageHeader.Equals(other.DataPageHeader) { return false } - if !p.IndexPageHeader.Equals(other.IndexPageHeader) { return false } - if !p.DictionaryPageHeader.Equals(other.DictionaryPageHeader) { return false } - if !p.DataPageHeaderV2.Equals(other.DataPageHeaderV2) { return false } - return true -} - -func (p *PageHeader) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PageHeader(%+v)", *p) -} - -// Wrapper struct to store key values -// -// Attributes: -// - Key -// - Value -type KeyValue struct { - Key string `thrift:"key,1,required" db:"key" json:"key"` - Value *string `thrift:"value,2" db:"value" json:"value,omitempty"` -} - -func NewKeyValue() *KeyValue { - return &KeyValue{} -} - - -func (p *KeyValue) GetKey() string { - return p.Key -} -var KeyValue_Value_DEFAULT string -func (p *KeyValue) GetValue() string { - if !p.IsSetValue() { - return KeyValue_Value_DEFAULT - } -return *p.Value -} -func (p *KeyValue) IsSetValue() bool { - return p.Value != nil -} - -func (p *KeyValue) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetKey bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetKey = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetKey{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")); - } - return nil -} - -func (p *KeyValue) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Key = v -} - return nil -} - -func (p *KeyValue) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Value = &v -} - return nil -} - -func (p *KeyValue) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "KeyValue"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *KeyValue) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) } - if err := oprot.WriteString(ctx, string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) } - return err -} - -func (p *KeyValue) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetValue() { - if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) } - if err := oprot.WriteString(ctx, string(*p.Value)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) } - } - return err -} - -func (p *KeyValue) Equals(other *KeyValue) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Key != other.Key { return false } - if p.Value != other.Value { - if p.Value == nil || other.Value == nil { - return false - } - if (*p.Value) != (*other.Value) { return false } - } - return true -} - -func (p *KeyValue) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("KeyValue(%+v)", *p) -} - -// Wrapper struct to specify sort order -// -// Attributes: -// - ColumnIdx: The column index (in this row group) * -// - Descending: If true, indicates this column is sorted in descending order. * -// - NullsFirst: If true, nulls will come before non-null values, otherwise, -// nulls go at the end. -type SortingColumn struct { - ColumnIdx int32 `thrift:"column_idx,1,required" db:"column_idx" json:"column_idx"` - Descending bool `thrift:"descending,2,required" db:"descending" json:"descending"` - NullsFirst bool `thrift:"nulls_first,3,required" db:"nulls_first" json:"nulls_first"` -} - -func NewSortingColumn() *SortingColumn { - return &SortingColumn{} -} - - -func (p *SortingColumn) GetColumnIdx() int32 { - return p.ColumnIdx -} - -func (p *SortingColumn) GetDescending() bool { - return p.Descending -} - -func (p *SortingColumn) GetNullsFirst() bool { - return p.NullsFirst -} -func (p *SortingColumn) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetColumnIdx bool = false; - var issetDescending bool = false; - var issetNullsFirst bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetColumnIdx = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetDescending = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetNullsFirst = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetColumnIdx{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ColumnIdx is not set")); - } - if !issetDescending{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Descending is not set")); - } - if !issetNullsFirst{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NullsFirst is not set")); - } - return nil -} - -func (p *SortingColumn) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.ColumnIdx = v -} - return nil -} - -func (p *SortingColumn) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.Descending = v -} - return nil -} - -func (p *SortingColumn) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.NullsFirst = v -} - return nil -} - -func (p *SortingColumn) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "SortingColumn"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *SortingColumn) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "column_idx", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:column_idx: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.ColumnIdx)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.column_idx (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:column_idx: ", p), err) } - return err -} - -func (p *SortingColumn) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "descending", thrift.BOOL, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:descending: ", p), err) } - if err := oprot.WriteBool(ctx, bool(p.Descending)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.descending (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:descending: ", p), err) } - return err -} - -func (p *SortingColumn) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "nulls_first", thrift.BOOL, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:nulls_first: ", p), err) } - if err := oprot.WriteBool(ctx, bool(p.NullsFirst)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.nulls_first (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:nulls_first: ", p), err) } - return err -} - -func (p *SortingColumn) Equals(other *SortingColumn) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.ColumnIdx != other.ColumnIdx { return false } - if p.Descending != other.Descending { return false } - if p.NullsFirst != other.NullsFirst { return false } - return true -} - -func (p *SortingColumn) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SortingColumn(%+v)", *p) -} - -// statistics of a given page type and encoding -// -// Attributes: -// - PageType: the page type (data/dic/...) * -// - Encoding: encoding of the page * -// - Count: number of pages of this type with this encoding * -type PageEncodingStats struct { - PageType PageType `thrift:"page_type,1,required" db:"page_type" json:"page_type"` - Encoding Encoding `thrift:"encoding,2,required" db:"encoding" json:"encoding"` - Count int32 `thrift:"count,3,required" db:"count" json:"count"` -} - -func NewPageEncodingStats() *PageEncodingStats { - return &PageEncodingStats{} -} - - -func (p *PageEncodingStats) GetPageType() PageType { - return p.PageType -} - -func (p *PageEncodingStats) GetEncoding() Encoding { - return p.Encoding -} - -func (p *PageEncodingStats) GetCount() int32 { - return p.Count -} -func (p *PageEncodingStats) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetPageType bool = false; - var issetEncoding bool = false; - var issetCount bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetPageType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetEncoding = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I32 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetCount = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetPageType{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PageType is not set")); - } - if !issetEncoding{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encoding is not set")); - } - if !issetCount{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Count is not set")); - } - return nil -} - -func (p *PageEncodingStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := PageType(v) - p.PageType = temp -} - return nil -} - -func (p *PageEncodingStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - temp := Encoding(v) - p.Encoding = temp -} - return nil -} - -func (p *PageEncodingStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.Count = v -} - return nil -} - -func (p *PageEncodingStats) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "PageEncodingStats"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *PageEncodingStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "page_type", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:page_type: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.PageType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.page_type (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:page_type: ", p), err) } - return err -} - -func (p *PageEncodingStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "encoding", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:encoding: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Encoding)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.encoding (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:encoding: ", p), err) } - return err -} - -func (p *PageEncodingStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "count", thrift.I32, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:count: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Count)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.count (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:count: ", p), err) } - return err -} - -func (p *PageEncodingStats) Equals(other *PageEncodingStats) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.PageType != other.PageType { return false } - if p.Encoding != other.Encoding { return false } - if p.Count != other.Count { return false } - return true -} - -func (p *PageEncodingStats) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PageEncodingStats(%+v)", *p) -} - -// Description for column metadata -// -// Attributes: -// - Type: Type of this column * -// - Encodings: Set of all encodings used for this column. The purpose is to validate -// whether we can decode those pages. * -// - PathInSchema: Path in schema * -// - Codec: Compression codec * -// - NumValues: Number of values in this column * -// - TotalUncompressedSize: total byte size of all uncompressed pages in this column chunk (including the headers) * -// - TotalCompressedSize: total byte size of all compressed, and potentially encrypted, pages -// in this column chunk (including the headers) * -// - KeyValueMetadata: Optional key/value metadata * -// - DataPageOffset: Byte offset from beginning of file to first data page * -// - IndexPageOffset: Byte offset from beginning of file to root index page * -// - DictionaryPageOffset: Byte offset from the beginning of file to first (only) dictionary page * -// - Statistics: optional statistics for this column chunk -// - EncodingStats: Set of all encodings used for pages in this column chunk. -// This information can be used to determine if all data pages are -// dictionary encoded for example * -// - BloomFilterOffset: Byte offset from beginning of file to Bloom filter data. * -type ColumnMetaData struct { - Type Type `thrift:"type,1,required" db:"type" json:"type"` - Encodings []Encoding `thrift:"encodings,2,required" db:"encodings" json:"encodings"` - PathInSchema []string `thrift:"path_in_schema,3,required" db:"path_in_schema" json:"path_in_schema"` - Codec CompressionCodec `thrift:"codec,4,required" db:"codec" json:"codec"` - NumValues int64 `thrift:"num_values,5,required" db:"num_values" json:"num_values"` - TotalUncompressedSize int64 `thrift:"total_uncompressed_size,6,required" db:"total_uncompressed_size" json:"total_uncompressed_size"` - TotalCompressedSize int64 `thrift:"total_compressed_size,7,required" db:"total_compressed_size" json:"total_compressed_size"` - KeyValueMetadata []*KeyValue `thrift:"key_value_metadata,8" db:"key_value_metadata" json:"key_value_metadata,omitempty"` - DataPageOffset int64 `thrift:"data_page_offset,9,required" db:"data_page_offset" json:"data_page_offset"` - IndexPageOffset *int64 `thrift:"index_page_offset,10" db:"index_page_offset" json:"index_page_offset,omitempty"` - DictionaryPageOffset *int64 `thrift:"dictionary_page_offset,11" db:"dictionary_page_offset" json:"dictionary_page_offset,omitempty"` - Statistics *Statistics `thrift:"statistics,12" db:"statistics" json:"statistics,omitempty"` - EncodingStats []*PageEncodingStats `thrift:"encoding_stats,13" db:"encoding_stats" json:"encoding_stats,omitempty"` - BloomFilterOffset *int64 `thrift:"bloom_filter_offset,14" db:"bloom_filter_offset" json:"bloom_filter_offset,omitempty"` -} - -func NewColumnMetaData() *ColumnMetaData { - return &ColumnMetaData{} -} - - -func (p *ColumnMetaData) GetType() Type { - return p.Type -} - -func (p *ColumnMetaData) GetEncodings() []Encoding { - return p.Encodings -} - -func (p *ColumnMetaData) GetPathInSchema() []string { - return p.PathInSchema -} - -func (p *ColumnMetaData) GetCodec() CompressionCodec { - return p.Codec -} - -func (p *ColumnMetaData) GetNumValues() int64 { - return p.NumValues -} - -func (p *ColumnMetaData) GetTotalUncompressedSize() int64 { - return p.TotalUncompressedSize -} - -func (p *ColumnMetaData) GetTotalCompressedSize() int64 { - return p.TotalCompressedSize -} -var ColumnMetaData_KeyValueMetadata_DEFAULT []*KeyValue - -func (p *ColumnMetaData) GetKeyValueMetadata() []*KeyValue { - return p.KeyValueMetadata -} - -func (p *ColumnMetaData) GetDataPageOffset() int64 { - return p.DataPageOffset -} -var ColumnMetaData_IndexPageOffset_DEFAULT int64 -func (p *ColumnMetaData) GetIndexPageOffset() int64 { - if !p.IsSetIndexPageOffset() { - return ColumnMetaData_IndexPageOffset_DEFAULT - } -return *p.IndexPageOffset -} -var ColumnMetaData_DictionaryPageOffset_DEFAULT int64 -func (p *ColumnMetaData) GetDictionaryPageOffset() int64 { - if !p.IsSetDictionaryPageOffset() { - return ColumnMetaData_DictionaryPageOffset_DEFAULT - } -return *p.DictionaryPageOffset -} -var ColumnMetaData_Statistics_DEFAULT *Statistics -func (p *ColumnMetaData) GetStatistics() *Statistics { - if !p.IsSetStatistics() { - return ColumnMetaData_Statistics_DEFAULT - } -return p.Statistics -} -var ColumnMetaData_EncodingStats_DEFAULT []*PageEncodingStats - -func (p *ColumnMetaData) GetEncodingStats() []*PageEncodingStats { - return p.EncodingStats -} -var ColumnMetaData_BloomFilterOffset_DEFAULT int64 -func (p *ColumnMetaData) GetBloomFilterOffset() int64 { - if !p.IsSetBloomFilterOffset() { - return ColumnMetaData_BloomFilterOffset_DEFAULT - } -return *p.BloomFilterOffset -} -func (p *ColumnMetaData) IsSetKeyValueMetadata() bool { - return p.KeyValueMetadata != nil -} - -func (p *ColumnMetaData) IsSetIndexPageOffset() bool { - return p.IndexPageOffset != nil -} - -func (p *ColumnMetaData) IsSetDictionaryPageOffset() bool { - return p.DictionaryPageOffset != nil -} - -func (p *ColumnMetaData) IsSetStatistics() bool { - return p.Statistics != nil -} - -func (p *ColumnMetaData) IsSetEncodingStats() bool { - return p.EncodingStats != nil -} - -func (p *ColumnMetaData) IsSetBloomFilterOffset() bool { - return p.BloomFilterOffset != nil -} - -func (p *ColumnMetaData) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetType bool = false; - var issetEncodings bool = false; - var issetPathInSchema bool = false; - var issetCodec bool = false; - var issetNumValues bool = false; - var issetTotalUncompressedSize bool = false; - var issetTotalCompressedSize bool = false; - var issetDataPageOffset bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetType = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetEncodings = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetPathInSchema = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetCodec = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - issetNumValues = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - issetTotalUncompressedSize = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I64 { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - issetTotalCompressedSize = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.LIST { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.I64 { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - issetDataPageOffset = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 10: - if fieldTypeId == thrift.I64 { - if err := p.ReadField10(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.I64 { - if err := p.ReadField11(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 12: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField12(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 13: - if fieldTypeId == thrift.LIST { - if err := p.ReadField13(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 14: - if fieldTypeId == thrift.I64 { - if err := p.ReadField14(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetType{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Type is not set")); - } - if !issetEncodings{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Encodings is not set")); - } - if !issetPathInSchema{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PathInSchema is not set")); - } - if !issetCodec{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Codec is not set")); - } - if !issetNumValues{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumValues is not set")); - } - if !issetTotalUncompressedSize{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TotalUncompressedSize is not set")); - } - if !issetTotalCompressedSize{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TotalCompressedSize is not set")); - } - if !issetDataPageOffset{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DataPageOffset is not set")); - } - return nil -} - -func (p *ColumnMetaData) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - temp := Type(v) - p.Type = temp -} - return nil -} - -func (p *ColumnMetaData) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]Encoding, 0, size) - p.Encodings = tSlice - for i := 0; i < size; i ++ { -var _elem0 Encoding - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 0: ", err) -} else { - temp := Encoding(v) - _elem0 = temp -} - p.Encodings = append(p.Encodings, _elem0) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ColumnMetaData) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]string, 0, size) - p.PathInSchema = tSlice - for i := 0; i < size; i ++ { -var _elem1 string - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 0: ", err) -} else { - _elem1 = v -} - p.PathInSchema = append(p.PathInSchema, _elem1) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ColumnMetaData) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - temp := CompressionCodec(v) - p.Codec = temp -} - return nil -} - -func (p *ColumnMetaData) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.NumValues = v -} - return nil -} - -func (p *ColumnMetaData) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - p.TotalUncompressedSize = v -} - return nil -} - -func (p *ColumnMetaData) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - p.TotalCompressedSize = v -} - return nil -} - -func (p *ColumnMetaData) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*KeyValue, 0, size) - p.KeyValueMetadata = tSlice - for i := 0; i < size; i ++ { - _elem2 := &KeyValue{} - if err := _elem2.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.KeyValueMetadata = append(p.KeyValueMetadata, _elem2) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ColumnMetaData) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) -} else { - p.DataPageOffset = v -} - return nil -} - -func (p *ColumnMetaData) ReadField10(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 10: ", err) -} else { - p.IndexPageOffset = &v -} - return nil -} - -func (p *ColumnMetaData) ReadField11(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 11: ", err) -} else { - p.DictionaryPageOffset = &v -} - return nil -} - -func (p *ColumnMetaData) ReadField12(ctx context.Context, iprot thrift.TProtocol) error { - p.Statistics = &Statistics{} - if err := p.Statistics.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Statistics), err) - } - return nil -} - -func (p *ColumnMetaData) ReadField13(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*PageEncodingStats, 0, size) - p.EncodingStats = tSlice - for i := 0; i < size; i ++ { - _elem3 := &PageEncodingStats{} - if err := _elem3.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) - } - p.EncodingStats = append(p.EncodingStats, _elem3) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ColumnMetaData) ReadField14(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 14: ", err) -} else { - p.BloomFilterOffset = &v -} - return nil -} - -func (p *ColumnMetaData) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "ColumnMetaData"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - if err := p.writeField8(ctx, oprot); err != nil { return err } - if err := p.writeField9(ctx, oprot); err != nil { return err } - if err := p.writeField10(ctx, oprot); err != nil { return err } - if err := p.writeField11(ctx, oprot); err != nil { return err } - if err := p.writeField12(ctx, oprot); err != nil { return err } - if err := p.writeField13(ctx, oprot); err != nil { return err } - if err := p.writeField14(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ColumnMetaData) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "type", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:type: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Type)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.type (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:type: ", p), err) } - return err -} - -func (p *ColumnMetaData) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "encodings", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:encodings: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.I32, len(p.Encodings)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Encodings { - if err := oprot.WriteI32(ctx, int32(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:encodings: ", p), err) } - return err -} - -func (p *ColumnMetaData) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "path_in_schema", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:path_in_schema: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.PathInSchema)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.PathInSchema { - if err := oprot.WriteString(ctx, string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:path_in_schema: ", p), err) } - return err -} - -func (p *ColumnMetaData) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "codec", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:codec: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Codec)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.codec (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:codec: ", p), err) } - return err -} - -func (p *ColumnMetaData) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "num_values", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:num_values: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.NumValues)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.num_values (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:num_values: ", p), err) } - return err -} - -func (p *ColumnMetaData) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "total_uncompressed_size", thrift.I64, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:total_uncompressed_size: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.TotalUncompressedSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.total_uncompressed_size (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:total_uncompressed_size: ", p), err) } - return err -} - -func (p *ColumnMetaData) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "total_compressed_size", thrift.I64, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:total_compressed_size: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.TotalCompressedSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.total_compressed_size (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:total_compressed_size: ", p), err) } - return err -} - -func (p *ColumnMetaData) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetKeyValueMetadata() { - if err := oprot.WriteFieldBegin(ctx, "key_value_metadata", thrift.LIST, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:key_value_metadata: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.KeyValueMetadata)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.KeyValueMetadata { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:key_value_metadata: ", p), err) } - } - return err -} - -func (p *ColumnMetaData) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "data_page_offset", thrift.I64, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:data_page_offset: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.DataPageOffset)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.data_page_offset (9) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:data_page_offset: ", p), err) } - return err -} - -func (p *ColumnMetaData) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetIndexPageOffset() { - if err := oprot.WriteFieldBegin(ctx, "index_page_offset", thrift.I64, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:index_page_offset: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.IndexPageOffset)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.index_page_offset (10) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:index_page_offset: ", p), err) } - } - return err -} - -func (p *ColumnMetaData) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetDictionaryPageOffset() { - if err := oprot.WriteFieldBegin(ctx, "dictionary_page_offset", thrift.I64, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:dictionary_page_offset: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.DictionaryPageOffset)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.dictionary_page_offset (11) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:dictionary_page_offset: ", p), err) } - } - return err -} - -func (p *ColumnMetaData) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetStatistics() { - if err := oprot.WriteFieldBegin(ctx, "statistics", thrift.STRUCT, 12); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:statistics: ", p), err) } - if err := p.Statistics.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Statistics), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 12:statistics: ", p), err) } - } - return err -} - -func (p *ColumnMetaData) writeField13(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetEncodingStats() { - if err := oprot.WriteFieldBegin(ctx, "encoding_stats", thrift.LIST, 13); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 13:encoding_stats: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.EncodingStats)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.EncodingStats { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 13:encoding_stats: ", p), err) } - } - return err -} - -func (p *ColumnMetaData) writeField14(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetBloomFilterOffset() { - if err := oprot.WriteFieldBegin(ctx, "bloom_filter_offset", thrift.I64, 14); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 14:bloom_filter_offset: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.BloomFilterOffset)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.bloom_filter_offset (14) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 14:bloom_filter_offset: ", p), err) } - } - return err -} - -func (p *ColumnMetaData) Equals(other *ColumnMetaData) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Type != other.Type { return false } - if len(p.Encodings) != len(other.Encodings) { return false } - for i, _tgt := range p.Encodings { - _src4 := other.Encodings[i] - if _tgt != _src4 { return false } - } - if len(p.PathInSchema) != len(other.PathInSchema) { return false } - for i, _tgt := range p.PathInSchema { - _src5 := other.PathInSchema[i] - if _tgt != _src5 { return false } - } - if p.Codec != other.Codec { return false } - if p.NumValues != other.NumValues { return false } - if p.TotalUncompressedSize != other.TotalUncompressedSize { return false } - if p.TotalCompressedSize != other.TotalCompressedSize { return false } - if len(p.KeyValueMetadata) != len(other.KeyValueMetadata) { return false } - for i, _tgt := range p.KeyValueMetadata { - _src6 := other.KeyValueMetadata[i] - if !_tgt.Equals(_src6) { return false } - } - if p.DataPageOffset != other.DataPageOffset { return false } - if p.IndexPageOffset != other.IndexPageOffset { - if p.IndexPageOffset == nil || other.IndexPageOffset == nil { - return false - } - if (*p.IndexPageOffset) != (*other.IndexPageOffset) { return false } - } - if p.DictionaryPageOffset != other.DictionaryPageOffset { - if p.DictionaryPageOffset == nil || other.DictionaryPageOffset == nil { - return false - } - if (*p.DictionaryPageOffset) != (*other.DictionaryPageOffset) { return false } - } - if !p.Statistics.Equals(other.Statistics) { return false } - if len(p.EncodingStats) != len(other.EncodingStats) { return false } - for i, _tgt := range p.EncodingStats { - _src7 := other.EncodingStats[i] - if !_tgt.Equals(_src7) { return false } - } - if p.BloomFilterOffset != other.BloomFilterOffset { - if p.BloomFilterOffset == nil || other.BloomFilterOffset == nil { - return false - } - if (*p.BloomFilterOffset) != (*other.BloomFilterOffset) { return false } - } - return true -} - -func (p *ColumnMetaData) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ColumnMetaData(%+v)", *p) -} - -type EncryptionWithFooterKey struct { -} - -func NewEncryptionWithFooterKey() *EncryptionWithFooterKey { - return &EncryptionWithFooterKey{} -} - -func (p *EncryptionWithFooterKey) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *EncryptionWithFooterKey) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "EncryptionWithFooterKey"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *EncryptionWithFooterKey) Equals(other *EncryptionWithFooterKey) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *EncryptionWithFooterKey) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("EncryptionWithFooterKey(%+v)", *p) -} - -// Attributes: -// - PathInSchema: Column path in schema * -// - KeyMetadata: Retrieval metadata of column encryption key * -type EncryptionWithColumnKey struct { - PathInSchema []string `thrift:"path_in_schema,1,required" db:"path_in_schema" json:"path_in_schema"` - KeyMetadata []byte `thrift:"key_metadata,2" db:"key_metadata" json:"key_metadata,omitempty"` -} - -func NewEncryptionWithColumnKey() *EncryptionWithColumnKey { - return &EncryptionWithColumnKey{} -} - - -func (p *EncryptionWithColumnKey) GetPathInSchema() []string { - return p.PathInSchema -} -var EncryptionWithColumnKey_KeyMetadata_DEFAULT []byte - -func (p *EncryptionWithColumnKey) GetKeyMetadata() []byte { - return p.KeyMetadata -} -func (p *EncryptionWithColumnKey) IsSetKeyMetadata() bool { - return p.KeyMetadata != nil -} - -func (p *EncryptionWithColumnKey) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetPathInSchema bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetPathInSchema = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetPathInSchema{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PathInSchema is not set")); - } - return nil -} - -func (p *EncryptionWithColumnKey) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]string, 0, size) - p.PathInSchema = tSlice - for i := 0; i < size; i ++ { -var _elem8 string - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 0: ", err) -} else { - _elem8 = v -} - p.PathInSchema = append(p.PathInSchema, _elem8) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *EncryptionWithColumnKey) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.KeyMetadata = v -} - return nil -} - -func (p *EncryptionWithColumnKey) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "EncryptionWithColumnKey"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *EncryptionWithColumnKey) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "path_in_schema", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:path_in_schema: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.PathInSchema)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.PathInSchema { - if err := oprot.WriteString(ctx, string(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:path_in_schema: ", p), err) } - return err -} - -func (p *EncryptionWithColumnKey) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetKeyMetadata() { - if err := oprot.WriteFieldBegin(ctx, "key_metadata", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:key_metadata: ", p), err) } - if err := oprot.WriteBinary(ctx, p.KeyMetadata); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key_metadata (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:key_metadata: ", p), err) } - } - return err -} - -func (p *EncryptionWithColumnKey) Equals(other *EncryptionWithColumnKey) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if len(p.PathInSchema) != len(other.PathInSchema) { return false } - for i, _tgt := range p.PathInSchema { - _src9 := other.PathInSchema[i] - if _tgt != _src9 { return false } - } - if bytes.Compare(p.KeyMetadata, other.KeyMetadata) != 0 { return false } - return true -} - -func (p *EncryptionWithColumnKey) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("EncryptionWithColumnKey(%+v)", *p) -} - -// Attributes: -// - ENCRYPTION_WITH_FOOTER_KEY -// - ENCRYPTION_WITH_COLUMN_KEY -type ColumnCryptoMetaData struct { - ENCRYPTION_WITH_FOOTER_KEY *EncryptionWithFooterKey `thrift:"ENCRYPTION_WITH_FOOTER_KEY,1" db:"ENCRYPTION_WITH_FOOTER_KEY" json:"ENCRYPTION_WITH_FOOTER_KEY,omitempty"` - ENCRYPTION_WITH_COLUMN_KEY *EncryptionWithColumnKey `thrift:"ENCRYPTION_WITH_COLUMN_KEY,2" db:"ENCRYPTION_WITH_COLUMN_KEY" json:"ENCRYPTION_WITH_COLUMN_KEY,omitempty"` -} - -func NewColumnCryptoMetaData() *ColumnCryptoMetaData { - return &ColumnCryptoMetaData{} -} - -var ColumnCryptoMetaData_ENCRYPTION_WITH_FOOTER_KEY_DEFAULT *EncryptionWithFooterKey -func (p *ColumnCryptoMetaData) GetENCRYPTION_WITH_FOOTER_KEY() *EncryptionWithFooterKey { - if !p.IsSetENCRYPTION_WITH_FOOTER_KEY() { - return ColumnCryptoMetaData_ENCRYPTION_WITH_FOOTER_KEY_DEFAULT - } -return p.ENCRYPTION_WITH_FOOTER_KEY -} -var ColumnCryptoMetaData_ENCRYPTION_WITH_COLUMN_KEY_DEFAULT *EncryptionWithColumnKey -func (p *ColumnCryptoMetaData) GetENCRYPTION_WITH_COLUMN_KEY() *EncryptionWithColumnKey { - if !p.IsSetENCRYPTION_WITH_COLUMN_KEY() { - return ColumnCryptoMetaData_ENCRYPTION_WITH_COLUMN_KEY_DEFAULT - } -return p.ENCRYPTION_WITH_COLUMN_KEY -} -func (p *ColumnCryptoMetaData) CountSetFieldsColumnCryptoMetaData() int { - count := 0 - if (p.IsSetENCRYPTION_WITH_FOOTER_KEY()) { - count++ - } - if (p.IsSetENCRYPTION_WITH_COLUMN_KEY()) { - count++ - } - return count - -} - -func (p *ColumnCryptoMetaData) IsSetENCRYPTION_WITH_FOOTER_KEY() bool { - return p.ENCRYPTION_WITH_FOOTER_KEY != nil -} - -func (p *ColumnCryptoMetaData) IsSetENCRYPTION_WITH_COLUMN_KEY() bool { - return p.ENCRYPTION_WITH_COLUMN_KEY != nil -} - -func (p *ColumnCryptoMetaData) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ColumnCryptoMetaData) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.ENCRYPTION_WITH_FOOTER_KEY = &EncryptionWithFooterKey{} - if err := p.ENCRYPTION_WITH_FOOTER_KEY.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ENCRYPTION_WITH_FOOTER_KEY), err) - } - return nil -} - -func (p *ColumnCryptoMetaData) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - p.ENCRYPTION_WITH_COLUMN_KEY = &EncryptionWithColumnKey{} - if err := p.ENCRYPTION_WITH_COLUMN_KEY.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ENCRYPTION_WITH_COLUMN_KEY), err) - } - return nil -} - -func (p *ColumnCryptoMetaData) Write(ctx context.Context, oprot thrift.TProtocol) error { - if c := p.CountSetFieldsColumnCryptoMetaData(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) - } - if err := oprot.WriteStructBegin(ctx, "ColumnCryptoMetaData"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ColumnCryptoMetaData) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetENCRYPTION_WITH_FOOTER_KEY() { - if err := oprot.WriteFieldBegin(ctx, "ENCRYPTION_WITH_FOOTER_KEY", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ENCRYPTION_WITH_FOOTER_KEY: ", p), err) } - if err := p.ENCRYPTION_WITH_FOOTER_KEY.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ENCRYPTION_WITH_FOOTER_KEY), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ENCRYPTION_WITH_FOOTER_KEY: ", p), err) } - } - return err -} - -func (p *ColumnCryptoMetaData) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetENCRYPTION_WITH_COLUMN_KEY() { - if err := oprot.WriteFieldBegin(ctx, "ENCRYPTION_WITH_COLUMN_KEY", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:ENCRYPTION_WITH_COLUMN_KEY: ", p), err) } - if err := p.ENCRYPTION_WITH_COLUMN_KEY.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ENCRYPTION_WITH_COLUMN_KEY), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:ENCRYPTION_WITH_COLUMN_KEY: ", p), err) } - } - return err -} - -func (p *ColumnCryptoMetaData) Equals(other *ColumnCryptoMetaData) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.ENCRYPTION_WITH_FOOTER_KEY.Equals(other.ENCRYPTION_WITH_FOOTER_KEY) { return false } - if !p.ENCRYPTION_WITH_COLUMN_KEY.Equals(other.ENCRYPTION_WITH_COLUMN_KEY) { return false } - return true -} - -func (p *ColumnCryptoMetaData) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ColumnCryptoMetaData(%+v)", *p) -} - -// Attributes: -// - FilePath: File where column data is stored. If not set, assumed to be same file as -// metadata. This path is relative to the current file. -// -// - FileOffset: Byte offset in file_path to the ColumnMetaData * -// - MetaData: Column metadata for this chunk. This is the same content as what is at -// file_path/file_offset. Having it here has it replicated in the file -// metadata. -// -// - OffsetIndexOffset: File offset of ColumnChunk's OffsetIndex * -// - OffsetIndexLength: Size of ColumnChunk's OffsetIndex, in bytes * -// - ColumnIndexOffset: File offset of ColumnChunk's ColumnIndex * -// - ColumnIndexLength: Size of ColumnChunk's ColumnIndex, in bytes * -// - CryptoMetadata: Crypto metadata of encrypted columns * -// - EncryptedColumnMetadata: Encrypted column metadata for this chunk * -type ColumnChunk struct { - FilePath *string `thrift:"file_path,1" db:"file_path" json:"file_path,omitempty"` - FileOffset int64 `thrift:"file_offset,2,required" db:"file_offset" json:"file_offset"` - MetaData *ColumnMetaData `thrift:"meta_data,3" db:"meta_data" json:"meta_data,omitempty"` - OffsetIndexOffset *int64 `thrift:"offset_index_offset,4" db:"offset_index_offset" json:"offset_index_offset,omitempty"` - OffsetIndexLength *int32 `thrift:"offset_index_length,5" db:"offset_index_length" json:"offset_index_length,omitempty"` - ColumnIndexOffset *int64 `thrift:"column_index_offset,6" db:"column_index_offset" json:"column_index_offset,omitempty"` - ColumnIndexLength *int32 `thrift:"column_index_length,7" db:"column_index_length" json:"column_index_length,omitempty"` - CryptoMetadata *ColumnCryptoMetaData `thrift:"crypto_metadata,8" db:"crypto_metadata" json:"crypto_metadata,omitempty"` - EncryptedColumnMetadata []byte `thrift:"encrypted_column_metadata,9" db:"encrypted_column_metadata" json:"encrypted_column_metadata,omitempty"` -} - -func NewColumnChunk() *ColumnChunk { - return &ColumnChunk{} -} - -var ColumnChunk_FilePath_DEFAULT string -func (p *ColumnChunk) GetFilePath() string { - if !p.IsSetFilePath() { - return ColumnChunk_FilePath_DEFAULT - } -return *p.FilePath -} - -func (p *ColumnChunk) GetFileOffset() int64 { - return p.FileOffset -} -var ColumnChunk_MetaData_DEFAULT *ColumnMetaData -func (p *ColumnChunk) GetMetaData() *ColumnMetaData { - if !p.IsSetMetaData() { - return ColumnChunk_MetaData_DEFAULT - } -return p.MetaData -} -var ColumnChunk_OffsetIndexOffset_DEFAULT int64 -func (p *ColumnChunk) GetOffsetIndexOffset() int64 { - if !p.IsSetOffsetIndexOffset() { - return ColumnChunk_OffsetIndexOffset_DEFAULT - } -return *p.OffsetIndexOffset -} -var ColumnChunk_OffsetIndexLength_DEFAULT int32 -func (p *ColumnChunk) GetOffsetIndexLength() int32 { - if !p.IsSetOffsetIndexLength() { - return ColumnChunk_OffsetIndexLength_DEFAULT - } -return *p.OffsetIndexLength -} -var ColumnChunk_ColumnIndexOffset_DEFAULT int64 -func (p *ColumnChunk) GetColumnIndexOffset() int64 { - if !p.IsSetColumnIndexOffset() { - return ColumnChunk_ColumnIndexOffset_DEFAULT - } -return *p.ColumnIndexOffset -} -var ColumnChunk_ColumnIndexLength_DEFAULT int32 -func (p *ColumnChunk) GetColumnIndexLength() int32 { - if !p.IsSetColumnIndexLength() { - return ColumnChunk_ColumnIndexLength_DEFAULT - } -return *p.ColumnIndexLength -} -var ColumnChunk_CryptoMetadata_DEFAULT *ColumnCryptoMetaData -func (p *ColumnChunk) GetCryptoMetadata() *ColumnCryptoMetaData { - if !p.IsSetCryptoMetadata() { - return ColumnChunk_CryptoMetadata_DEFAULT - } -return p.CryptoMetadata -} -var ColumnChunk_EncryptedColumnMetadata_DEFAULT []byte - -func (p *ColumnChunk) GetEncryptedColumnMetadata() []byte { - return p.EncryptedColumnMetadata -} -func (p *ColumnChunk) IsSetFilePath() bool { - return p.FilePath != nil -} - -func (p *ColumnChunk) IsSetMetaData() bool { - return p.MetaData != nil -} - -func (p *ColumnChunk) IsSetOffsetIndexOffset() bool { - return p.OffsetIndexOffset != nil -} - -func (p *ColumnChunk) IsSetOffsetIndexLength() bool { - return p.OffsetIndexLength != nil -} - -func (p *ColumnChunk) IsSetColumnIndexOffset() bool { - return p.ColumnIndexOffset != nil -} - -func (p *ColumnChunk) IsSetColumnIndexLength() bool { - return p.ColumnIndexLength != nil -} - -func (p *ColumnChunk) IsSetCryptoMetadata() bool { - return p.CryptoMetadata != nil -} - -func (p *ColumnChunk) IsSetEncryptedColumnMetadata() bool { - return p.EncryptedColumnMetadata != nil -} - -func (p *ColumnChunk) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetFileOffset bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetFileOffset = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I32 { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.STRING { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetFileOffset{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FileOffset is not set")); - } - return nil -} - -func (p *ColumnChunk) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.FilePath = &v -} - return nil -} - -func (p *ColumnChunk) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.FileOffset = v -} - return nil -} - -func (p *ColumnChunk) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - p.MetaData = &ColumnMetaData{} - if err := p.MetaData.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.MetaData), err) - } - return nil -} - -func (p *ColumnChunk) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - p.OffsetIndexOffset = &v -} - return nil -} - -func (p *ColumnChunk) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.OffsetIndexLength = &v -} - return nil -} - -func (p *ColumnChunk) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - p.ColumnIndexOffset = &v -} - return nil -} - -func (p *ColumnChunk) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - p.ColumnIndexLength = &v -} - return nil -} - -func (p *ColumnChunk) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - p.CryptoMetadata = &ColumnCryptoMetaData{} - if err := p.CryptoMetadata.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.CryptoMetadata), err) - } - return nil -} - -func (p *ColumnChunk) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) -} else { - p.EncryptedColumnMetadata = v -} - return nil -} - -func (p *ColumnChunk) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "ColumnChunk"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - if err := p.writeField8(ctx, oprot); err != nil { return err } - if err := p.writeField9(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ColumnChunk) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetFilePath() { - if err := oprot.WriteFieldBegin(ctx, "file_path", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:file_path: ", p), err) } - if err := oprot.WriteString(ctx, string(*p.FilePath)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.file_path (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:file_path: ", p), err) } - } - return err -} - -func (p *ColumnChunk) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "file_offset", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:file_offset: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.FileOffset)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.file_offset (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:file_offset: ", p), err) } - return err -} - -func (p *ColumnChunk) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetMetaData() { - if err := oprot.WriteFieldBegin(ctx, "meta_data", thrift.STRUCT, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:meta_data: ", p), err) } - if err := p.MetaData.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.MetaData), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:meta_data: ", p), err) } - } - return err -} - -func (p *ColumnChunk) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetOffsetIndexOffset() { - if err := oprot.WriteFieldBegin(ctx, "offset_index_offset", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:offset_index_offset: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.OffsetIndexOffset)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.offset_index_offset (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:offset_index_offset: ", p), err) } - } - return err -} - -func (p *ColumnChunk) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetOffsetIndexLength() { - if err := oprot.WriteFieldBegin(ctx, "offset_index_length", thrift.I32, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:offset_index_length: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.OffsetIndexLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.offset_index_length (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:offset_index_length: ", p), err) } - } - return err -} - -func (p *ColumnChunk) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetColumnIndexOffset() { - if err := oprot.WriteFieldBegin(ctx, "column_index_offset", thrift.I64, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:column_index_offset: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.ColumnIndexOffset)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.column_index_offset (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:column_index_offset: ", p), err) } - } - return err -} - -func (p *ColumnChunk) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetColumnIndexLength() { - if err := oprot.WriteFieldBegin(ctx, "column_index_length", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:column_index_length: ", p), err) } - if err := oprot.WriteI32(ctx, int32(*p.ColumnIndexLength)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.column_index_length (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:column_index_length: ", p), err) } - } - return err -} - -func (p *ColumnChunk) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetCryptoMetadata() { - if err := oprot.WriteFieldBegin(ctx, "crypto_metadata", thrift.STRUCT, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:crypto_metadata: ", p), err) } - if err := p.CryptoMetadata.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.CryptoMetadata), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:crypto_metadata: ", p), err) } - } - return err -} - -func (p *ColumnChunk) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetEncryptedColumnMetadata() { - if err := oprot.WriteFieldBegin(ctx, "encrypted_column_metadata", thrift.STRING, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:encrypted_column_metadata: ", p), err) } - if err := oprot.WriteBinary(ctx, p.EncryptedColumnMetadata); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.encrypted_column_metadata (9) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:encrypted_column_metadata: ", p), err) } - } - return err -} - -func (p *ColumnChunk) Equals(other *ColumnChunk) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.FilePath != other.FilePath { - if p.FilePath == nil || other.FilePath == nil { - return false - } - if (*p.FilePath) != (*other.FilePath) { return false } - } - if p.FileOffset != other.FileOffset { return false } - if !p.MetaData.Equals(other.MetaData) { return false } - if p.OffsetIndexOffset != other.OffsetIndexOffset { - if p.OffsetIndexOffset == nil || other.OffsetIndexOffset == nil { - return false - } - if (*p.OffsetIndexOffset) != (*other.OffsetIndexOffset) { return false } - } - if p.OffsetIndexLength != other.OffsetIndexLength { - if p.OffsetIndexLength == nil || other.OffsetIndexLength == nil { - return false - } - if (*p.OffsetIndexLength) != (*other.OffsetIndexLength) { return false } - } - if p.ColumnIndexOffset != other.ColumnIndexOffset { - if p.ColumnIndexOffset == nil || other.ColumnIndexOffset == nil { - return false - } - if (*p.ColumnIndexOffset) != (*other.ColumnIndexOffset) { return false } - } - if p.ColumnIndexLength != other.ColumnIndexLength { - if p.ColumnIndexLength == nil || other.ColumnIndexLength == nil { - return false - } - if (*p.ColumnIndexLength) != (*other.ColumnIndexLength) { return false } - } - if !p.CryptoMetadata.Equals(other.CryptoMetadata) { return false } - if bytes.Compare(p.EncryptedColumnMetadata, other.EncryptedColumnMetadata) != 0 { return false } - return true -} - -func (p *ColumnChunk) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ColumnChunk(%+v)", *p) -} - -// Attributes: -// - Columns: Metadata for each column chunk in this row group. -// This list must have the same order as the SchemaElement list in FileMetaData. -// -// - TotalByteSize: Total byte size of all the uncompressed column data in this row group * -// - NumRows: Number of rows in this row group * -// - SortingColumns: If set, specifies a sort ordering of the rows in this RowGroup. -// The sorting columns can be a subset of all the columns. -// - FileOffset: Byte offset from beginning of file to first page (data or dictionary) -// in this row group * -// - TotalCompressedSize: Total byte size of all compressed (and potentially encrypted) column data -// in this row group * -// - Ordinal: Row group ordinal in the file * -type RowGroup struct { - Columns []*ColumnChunk `thrift:"columns,1,required" db:"columns" json:"columns"` - TotalByteSize int64 `thrift:"total_byte_size,2,required" db:"total_byte_size" json:"total_byte_size"` - NumRows int64 `thrift:"num_rows,3,required" db:"num_rows" json:"num_rows"` - SortingColumns []*SortingColumn `thrift:"sorting_columns,4" db:"sorting_columns" json:"sorting_columns,omitempty"` - FileOffset *int64 `thrift:"file_offset,5" db:"file_offset" json:"file_offset,omitempty"` - TotalCompressedSize *int64 `thrift:"total_compressed_size,6" db:"total_compressed_size" json:"total_compressed_size,omitempty"` - Ordinal *int16 `thrift:"ordinal,7" db:"ordinal" json:"ordinal,omitempty"` -} - -func NewRowGroup() *RowGroup { - return &RowGroup{} -} - - -func (p *RowGroup) GetColumns() []*ColumnChunk { - return p.Columns -} - -func (p *RowGroup) GetTotalByteSize() int64 { - return p.TotalByteSize -} - -func (p *RowGroup) GetNumRows() int64 { - return p.NumRows -} -var RowGroup_SortingColumns_DEFAULT []*SortingColumn - -func (p *RowGroup) GetSortingColumns() []*SortingColumn { - return p.SortingColumns -} -var RowGroup_FileOffset_DEFAULT int64 -func (p *RowGroup) GetFileOffset() int64 { - if !p.IsSetFileOffset() { - return RowGroup_FileOffset_DEFAULT - } -return *p.FileOffset -} -var RowGroup_TotalCompressedSize_DEFAULT int64 -func (p *RowGroup) GetTotalCompressedSize() int64 { - if !p.IsSetTotalCompressedSize() { - return RowGroup_TotalCompressedSize_DEFAULT - } -return *p.TotalCompressedSize -} -var RowGroup_Ordinal_DEFAULT int16 -func (p *RowGroup) GetOrdinal() int16 { - if !p.IsSetOrdinal() { - return RowGroup_Ordinal_DEFAULT - } -return *p.Ordinal -} -func (p *RowGroup) IsSetSortingColumns() bool { - return p.SortingColumns != nil -} - -func (p *RowGroup) IsSetFileOffset() bool { - return p.FileOffset != nil -} - -func (p *RowGroup) IsSetTotalCompressedSize() bool { - return p.TotalCompressedSize != nil -} - -func (p *RowGroup) IsSetOrdinal() bool { - return p.Ordinal != nil -} - -func (p *RowGroup) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetColumns bool = false; - var issetTotalByteSize bool = false; - var issetNumRows bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetColumns = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetTotalByteSize = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetNumRows = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.LIST { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.I64 { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I16 { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetColumns{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Columns is not set")); - } - if !issetTotalByteSize{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TotalByteSize is not set")); - } - if !issetNumRows{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumRows is not set")); - } - return nil -} - -func (p *RowGroup) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*ColumnChunk, 0, size) - p.Columns = tSlice - for i := 0; i < size; i ++ { - _elem10 := &ColumnChunk{} - if err := _elem10.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) - } - p.Columns = append(p.Columns, _elem10) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *RowGroup) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.TotalByteSize = v -} - return nil -} - -func (p *RowGroup) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.NumRows = v -} - return nil -} - -func (p *RowGroup) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*SortingColumn, 0, size) - p.SortingColumns = tSlice - for i := 0; i < size; i ++ { - _elem11 := &SortingColumn{} - if err := _elem11.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem11), err) - } - p.SortingColumns = append(p.SortingColumns, _elem11) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *RowGroup) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 5: ", err) -} else { - p.FileOffset = &v -} - return nil -} - -func (p *RowGroup) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - p.TotalCompressedSize = &v -} - return nil -} - -func (p *RowGroup) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI16(ctx); err != nil { - return thrift.PrependError("error reading field 7: ", err) -} else { - p.Ordinal = &v -} - return nil -} - -func (p *RowGroup) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "RowGroup"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *RowGroup) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "columns", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:columns: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Columns)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Columns { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:columns: ", p), err) } - return err -} - -func (p *RowGroup) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "total_byte_size", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:total_byte_size: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.TotalByteSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.total_byte_size (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:total_byte_size: ", p), err) } - return err -} - -func (p *RowGroup) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "num_rows", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:num_rows: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.NumRows)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.num_rows (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:num_rows: ", p), err) } - return err -} - -func (p *RowGroup) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSortingColumns() { - if err := oprot.WriteFieldBegin(ctx, "sorting_columns", thrift.LIST, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:sorting_columns: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.SortingColumns)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.SortingColumns { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:sorting_columns: ", p), err) } - } - return err -} - -func (p *RowGroup) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetFileOffset() { - if err := oprot.WriteFieldBegin(ctx, "file_offset", thrift.I64, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:file_offset: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.FileOffset)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.file_offset (5) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:file_offset: ", p), err) } - } - return err -} - -func (p *RowGroup) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTotalCompressedSize() { - if err := oprot.WriteFieldBegin(ctx, "total_compressed_size", thrift.I64, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:total_compressed_size: ", p), err) } - if err := oprot.WriteI64(ctx, int64(*p.TotalCompressedSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.total_compressed_size (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:total_compressed_size: ", p), err) } - } - return err -} - -func (p *RowGroup) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetOrdinal() { - if err := oprot.WriteFieldBegin(ctx, "ordinal", thrift.I16, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:ordinal: ", p), err) } - if err := oprot.WriteI16(ctx, int16(*p.Ordinal)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ordinal (7) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:ordinal: ", p), err) } - } - return err -} - -func (p *RowGroup) Equals(other *RowGroup) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if len(p.Columns) != len(other.Columns) { return false } - for i, _tgt := range p.Columns { - _src12 := other.Columns[i] - if !_tgt.Equals(_src12) { return false } - } - if p.TotalByteSize != other.TotalByteSize { return false } - if p.NumRows != other.NumRows { return false } - if len(p.SortingColumns) != len(other.SortingColumns) { return false } - for i, _tgt := range p.SortingColumns { - _src13 := other.SortingColumns[i] - if !_tgt.Equals(_src13) { return false } - } - if p.FileOffset != other.FileOffset { - if p.FileOffset == nil || other.FileOffset == nil { - return false - } - if (*p.FileOffset) != (*other.FileOffset) { return false } - } - if p.TotalCompressedSize != other.TotalCompressedSize { - if p.TotalCompressedSize == nil || other.TotalCompressedSize == nil { - return false - } - if (*p.TotalCompressedSize) != (*other.TotalCompressedSize) { return false } - } - if p.Ordinal != other.Ordinal { - if p.Ordinal == nil || other.Ordinal == nil { - return false - } - if (*p.Ordinal) != (*other.Ordinal) { return false } - } - return true -} - -func (p *RowGroup) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("RowGroup(%+v)", *p) -} - -// Empty struct to signal the order defined by the physical or logical type -type TypeDefinedOrder struct { -} - -func NewTypeDefinedOrder() *TypeDefinedOrder { - return &TypeDefinedOrder{} -} - -func (p *TypeDefinedOrder) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *TypeDefinedOrder) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "TypeDefinedOrder"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *TypeDefinedOrder) Equals(other *TypeDefinedOrder) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - return true -} - -func (p *TypeDefinedOrder) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("TypeDefinedOrder(%+v)", *p) -} - -// Union to specify the order used for the min_value and max_value fields for a -// column. This union takes the role of an enhanced enum that allows rich -// elements (which will be needed for a collation-based ordering in the future). -// -// Possible values are: -// * TypeDefinedOrder - the column uses the order defined by its logical or -// physical type (if there is no logical type). -// -// If the reader does not support the value of this union, min and max stats -// for this column should be ignored. -// -// Attributes: -// - TYPE_ORDER: The sort orders for logical types are: -// UTF8 - unsigned byte-wise comparison -// INT8 - signed comparison -// INT16 - signed comparison -// INT32 - signed comparison -// INT64 - signed comparison -// UINT8 - unsigned comparison -// UINT16 - unsigned comparison -// UINT32 - unsigned comparison -// UINT64 - unsigned comparison -// DECIMAL - signed comparison of the represented value -// DATE - signed comparison -// TIME_MILLIS - signed comparison -// TIME_MICROS - signed comparison -// TIMESTAMP_MILLIS - signed comparison -// TIMESTAMP_MICROS - signed comparison -// INTERVAL - unsigned comparison -// JSON - unsigned byte-wise comparison -// BSON - unsigned byte-wise comparison -// ENUM - unsigned byte-wise comparison -// LIST - undefined -// MAP - undefined -// -// In the absence of logical types, the sort order is determined by the physical type: -// BOOLEAN - false, true -// INT32 - signed comparison -// INT64 - signed comparison -// INT96 (only used for legacy timestamps) - undefined -// FLOAT - signed comparison of the represented value (*) -// DOUBLE - signed comparison of the represented value (*) -// BYTE_ARRAY - unsigned byte-wise comparison -// FIXED_LEN_BYTE_ARRAY - unsigned byte-wise comparison -// -// (*) Because the sorting order is not specified properly for floating -// point values (relations vs. total ordering) the following -// compatibility rules should be applied when reading statistics: -// - If the min is a NaN, it should be ignored. -// - If the max is a NaN, it should be ignored. -// - If the min is +0, the row group may contain -0 values as well. -// - If the max is -0, the row group may contain +0 values as well. -// - When looking for NaN values, min and max should be ignored. -type ColumnOrder struct { - TYPE_ORDER *TypeDefinedOrder `thrift:"TYPE_ORDER,1" db:"TYPE_ORDER" json:"TYPE_ORDER,omitempty"` -} - -func NewColumnOrder() *ColumnOrder { - return &ColumnOrder{} -} - -var ColumnOrder_TYPE_ORDER_DEFAULT *TypeDefinedOrder -func (p *ColumnOrder) GetTYPE_ORDER() *TypeDefinedOrder { - if !p.IsSetTYPE_ORDER() { - return ColumnOrder_TYPE_ORDER_DEFAULT - } -return p.TYPE_ORDER -} -func (p *ColumnOrder) CountSetFieldsColumnOrder() int { - count := 0 - if (p.IsSetTYPE_ORDER()) { - count++ - } - return count - -} - -func (p *ColumnOrder) IsSetTYPE_ORDER() bool { - return p.TYPE_ORDER != nil -} - -func (p *ColumnOrder) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *ColumnOrder) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.TYPE_ORDER = &TypeDefinedOrder{} - if err := p.TYPE_ORDER.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.TYPE_ORDER), err) - } - return nil -} - -func (p *ColumnOrder) Write(ctx context.Context, oprot thrift.TProtocol) error { - if c := p.CountSetFieldsColumnOrder(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) - } - if err := oprot.WriteStructBegin(ctx, "ColumnOrder"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ColumnOrder) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetTYPE_ORDER() { - if err := oprot.WriteFieldBegin(ctx, "TYPE_ORDER", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:TYPE_ORDER: ", p), err) } - if err := p.TYPE_ORDER.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.TYPE_ORDER), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:TYPE_ORDER: ", p), err) } - } - return err -} - -func (p *ColumnOrder) Equals(other *ColumnOrder) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.TYPE_ORDER.Equals(other.TYPE_ORDER) { return false } - return true -} - -func (p *ColumnOrder) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ColumnOrder(%+v)", *p) -} - -// Attributes: -// - Offset: Offset of the page in the file * -// - CompressedPageSize: Size of the page, including header. Sum of compressed_page_size and header -// length -// - FirstRowIndex: Index within the RowGroup of the first row of the page; this means pages -// change on record boundaries (r = 0). -type PageLocation struct { - Offset int64 `thrift:"offset,1,required" db:"offset" json:"offset"` - CompressedPageSize int32 `thrift:"compressed_page_size,2,required" db:"compressed_page_size" json:"compressed_page_size"` - FirstRowIndex int64 `thrift:"first_row_index,3,required" db:"first_row_index" json:"first_row_index"` -} - -func NewPageLocation() *PageLocation { - return &PageLocation{} -} - - -func (p *PageLocation) GetOffset() int64 { - return p.Offset -} - -func (p *PageLocation) GetCompressedPageSize() int32 { - return p.CompressedPageSize -} - -func (p *PageLocation) GetFirstRowIndex() int64 { - return p.FirstRowIndex -} -func (p *PageLocation) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOffset bool = false; - var issetCompressedPageSize bool = false; - var issetFirstRowIndex bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetOffset = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetCompressedPageSize = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetFirstRowIndex = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOffset{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Offset is not set")); - } - if !issetCompressedPageSize{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field CompressedPageSize is not set")); - } - if !issetFirstRowIndex{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FirstRowIndex is not set")); - } - return nil -} - -func (p *PageLocation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Offset = v -} - return nil -} - -func (p *PageLocation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.CompressedPageSize = v -} - return nil -} - -func (p *PageLocation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.FirstRowIndex = v -} - return nil -} - -func (p *PageLocation) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "PageLocation"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *PageLocation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "offset", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:offset: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.Offset)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.offset (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:offset: ", p), err) } - return err -} - -func (p *PageLocation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "compressed_page_size", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:compressed_page_size: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.CompressedPageSize)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.compressed_page_size (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:compressed_page_size: ", p), err) } - return err -} - -func (p *PageLocation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "first_row_index", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:first_row_index: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.FirstRowIndex)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.first_row_index (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:first_row_index: ", p), err) } - return err -} - -func (p *PageLocation) Equals(other *PageLocation) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Offset != other.Offset { return false } - if p.CompressedPageSize != other.CompressedPageSize { return false } - if p.FirstRowIndex != other.FirstRowIndex { return false } - return true -} - -func (p *PageLocation) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("PageLocation(%+v)", *p) -} - -// Attributes: -// - PageLocations: PageLocations, ordered by increasing PageLocation.offset. It is required -// that page_locations[i].first_row_index < page_locations[i+1].first_row_index. -type OffsetIndex struct { - PageLocations []*PageLocation `thrift:"page_locations,1,required" db:"page_locations" json:"page_locations"` -} - -func NewOffsetIndex() *OffsetIndex { - return &OffsetIndex{} -} - - -func (p *OffsetIndex) GetPageLocations() []*PageLocation { - return p.PageLocations -} -func (p *OffsetIndex) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetPageLocations bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetPageLocations = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetPageLocations{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PageLocations is not set")); - } - return nil -} - -func (p *OffsetIndex) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*PageLocation, 0, size) - p.PageLocations = tSlice - for i := 0; i < size; i ++ { - _elem14 := &PageLocation{} - if err := _elem14.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem14), err) - } - p.PageLocations = append(p.PageLocations, _elem14) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *OffsetIndex) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "OffsetIndex"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *OffsetIndex) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "page_locations", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:page_locations: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.PageLocations)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.PageLocations { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:page_locations: ", p), err) } - return err -} - -func (p *OffsetIndex) Equals(other *OffsetIndex) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if len(p.PageLocations) != len(other.PageLocations) { return false } - for i, _tgt := range p.PageLocations { - _src15 := other.PageLocations[i] - if !_tgt.Equals(_src15) { return false } - } - return true -} - -func (p *OffsetIndex) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("OffsetIndex(%+v)", *p) -} - -// Description for ColumnIndex. -// Each [i] refers to the page at OffsetIndex.page_locations[i] -// -// Attributes: -// - NullPages: A list of Boolean values to determine the validity of the corresponding -// min and max values. If true, a page contains only null values, and writers -// have to set the corresponding entries in min_values and max_values to -// byte[0], so that all lists have the same length. If false, the -// corresponding entries in min_values and max_values must be valid. -// - MinValues: Two lists containing lower and upper bounds for the values of each page. -// These may be the actual minimum and maximum values found on a page, but -// can also be (more compact) values that do not exist on a page. For -// example, instead of storing ""Blart Versenwald III", a writer may set -// min_values[i]="B", max_values[i]="C". Such more compact values must still -// be valid values within the column's logical type. Readers must make sure -// that list entries are populated before using them by inspecting null_pages. -// - MaxValues -// - BoundaryOrder: Stores whether both min_values and max_values are orderd and if so, in -// which direction. This allows readers to perform binary searches in both -// lists. Readers cannot assume that max_values[i] <= min_values[i+1], even -// if the lists are ordered. -// - NullCounts: A list containing the number of null values for each page * -type ColumnIndex struct { - NullPages []bool `thrift:"null_pages,1,required" db:"null_pages" json:"null_pages"` - MinValues [][]byte `thrift:"min_values,2,required" db:"min_values" json:"min_values"` - MaxValues [][]byte `thrift:"max_values,3,required" db:"max_values" json:"max_values"` - BoundaryOrder BoundaryOrder `thrift:"boundary_order,4,required" db:"boundary_order" json:"boundary_order"` - NullCounts []int64 `thrift:"null_counts,5" db:"null_counts" json:"null_counts,omitempty"` -} - -func NewColumnIndex() *ColumnIndex { - return &ColumnIndex{} -} - - -func (p *ColumnIndex) GetNullPages() []bool { - return p.NullPages -} - -func (p *ColumnIndex) GetMinValues() [][]byte { - return p.MinValues -} - -func (p *ColumnIndex) GetMaxValues() [][]byte { - return p.MaxValues -} - -func (p *ColumnIndex) GetBoundaryOrder() BoundaryOrder { - return p.BoundaryOrder -} -var ColumnIndex_NullCounts_DEFAULT []int64 - -func (p *ColumnIndex) GetNullCounts() []int64 { - return p.NullCounts -} -func (p *ColumnIndex) IsSetNullCounts() bool { - return p.NullCounts != nil -} - -func (p *ColumnIndex) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetNullPages bool = false; - var issetMinValues bool = false; - var issetMaxValues bool = false; - var issetBoundaryOrder bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetNullPages = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetMinValues = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.LIST { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetMaxValues = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.I32 { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetBoundaryOrder = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.LIST { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetNullPages{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NullPages is not set")); - } - if !issetMinValues{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MinValues is not set")); - } - if !issetMaxValues{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValues is not set")); - } - if !issetBoundaryOrder{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BoundaryOrder is not set")); - } - return nil -} - -func (p *ColumnIndex) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]bool, 0, size) - p.NullPages = tSlice - for i := 0; i < size; i ++ { -var _elem16 bool - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 0: ", err) -} else { - _elem16 = v -} - p.NullPages = append(p.NullPages, _elem16) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ColumnIndex) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([][]byte, 0, size) - p.MinValues = tSlice - for i := 0; i < size; i ++ { -var _elem17 []byte - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 0: ", err) -} else { - _elem17 = v -} - p.MinValues = append(p.MinValues, _elem17) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ColumnIndex) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([][]byte, 0, size) - p.MaxValues = tSlice - for i := 0; i < size; i ++ { -var _elem18 []byte - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 0: ", err) -} else { - _elem18 = v -} - p.MaxValues = append(p.MaxValues, _elem18) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ColumnIndex) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 4: ", err) -} else { - temp := BoundaryOrder(v) - p.BoundaryOrder = temp -} - return nil -} - -func (p *ColumnIndex) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]int64, 0, size) - p.NullCounts = tSlice - for i := 0; i < size; i ++ { -var _elem19 int64 - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 0: ", err) -} else { - _elem19 = v -} - p.NullCounts = append(p.NullCounts, _elem19) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *ColumnIndex) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "ColumnIndex"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *ColumnIndex) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "null_pages", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:null_pages: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.BOOL, len(p.NullPages)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.NullPages { - if err := oprot.WriteBool(ctx, bool(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:null_pages: ", p), err) } - return err -} - -func (p *ColumnIndex) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "min_values", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:min_values: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.MinValues)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.MinValues { - if err := oprot.WriteBinary(ctx, v); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:min_values: ", p), err) } - return err -} - -func (p *ColumnIndex) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "max_values", thrift.LIST, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:max_values: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRING, len(p.MaxValues)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.MaxValues { - if err := oprot.WriteBinary(ctx, v); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:max_values: ", p), err) } - return err -} - -func (p *ColumnIndex) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "boundary_order", thrift.I32, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:boundary_order: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.BoundaryOrder)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.boundary_order (4) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:boundary_order: ", p), err) } - return err -} - -func (p *ColumnIndex) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetNullCounts() { - if err := oprot.WriteFieldBegin(ctx, "null_counts", thrift.LIST, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:null_counts: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.I64, len(p.NullCounts)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.NullCounts { - if err := oprot.WriteI64(ctx, int64(v)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:null_counts: ", p), err) } - } - return err -} - -func (p *ColumnIndex) Equals(other *ColumnIndex) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if len(p.NullPages) != len(other.NullPages) { return false } - for i, _tgt := range p.NullPages { - _src20 := other.NullPages[i] - if _tgt != _src20 { return false } - } - if len(p.MinValues) != len(other.MinValues) { return false } - for i, _tgt := range p.MinValues { - _src21 := other.MinValues[i] - if bytes.Compare(_tgt, _src21) != 0 { return false } - } - if len(p.MaxValues) != len(other.MaxValues) { return false } - for i, _tgt := range p.MaxValues { - _src22 := other.MaxValues[i] - if bytes.Compare(_tgt, _src22) != 0 { return false } - } - if p.BoundaryOrder != other.BoundaryOrder { return false } - if len(p.NullCounts) != len(other.NullCounts) { return false } - for i, _tgt := range p.NullCounts { - _src23 := other.NullCounts[i] - if _tgt != _src23 { return false } - } - return true -} - -func (p *ColumnIndex) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("ColumnIndex(%+v)", *p) -} - -// Attributes: -// - AadPrefix: AAD prefix * -// - AadFileUnique: Unique file identifier part of AAD suffix * -// - SupplyAadPrefix: In files encrypted with AAD prefix without storing it, -// readers must supply the prefix * -type AesGcmV1 struct { - AadPrefix []byte `thrift:"aad_prefix,1" db:"aad_prefix" json:"aad_prefix,omitempty"` - AadFileUnique []byte `thrift:"aad_file_unique,2" db:"aad_file_unique" json:"aad_file_unique,omitempty"` - SupplyAadPrefix *bool `thrift:"supply_aad_prefix,3" db:"supply_aad_prefix" json:"supply_aad_prefix,omitempty"` -} - -func NewAesGcmV1() *AesGcmV1 { - return &AesGcmV1{} -} - -var AesGcmV1_AadPrefix_DEFAULT []byte - -func (p *AesGcmV1) GetAadPrefix() []byte { - return p.AadPrefix -} -var AesGcmV1_AadFileUnique_DEFAULT []byte - -func (p *AesGcmV1) GetAadFileUnique() []byte { - return p.AadFileUnique -} -var AesGcmV1_SupplyAadPrefix_DEFAULT bool -func (p *AesGcmV1) GetSupplyAadPrefix() bool { - if !p.IsSetSupplyAadPrefix() { - return AesGcmV1_SupplyAadPrefix_DEFAULT - } -return *p.SupplyAadPrefix -} -func (p *AesGcmV1) IsSetAadPrefix() bool { - return p.AadPrefix != nil -} - -func (p *AesGcmV1) IsSetAadFileUnique() bool { - return p.AadFileUnique != nil -} - -func (p *AesGcmV1) IsSetSupplyAadPrefix() bool { - return p.SupplyAadPrefix != nil -} - -func (p *AesGcmV1) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AesGcmV1) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.AadPrefix = v -} - return nil -} - -func (p *AesGcmV1) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.AadFileUnique = v -} - return nil -} - -func (p *AesGcmV1) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.SupplyAadPrefix = &v -} - return nil -} - -func (p *AesGcmV1) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "AesGcmV1"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *AesGcmV1) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetAadPrefix() { - if err := oprot.WriteFieldBegin(ctx, "aad_prefix", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:aad_prefix: ", p), err) } - if err := oprot.WriteBinary(ctx, p.AadPrefix); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.aad_prefix (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:aad_prefix: ", p), err) } - } - return err -} - -func (p *AesGcmV1) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetAadFileUnique() { - if err := oprot.WriteFieldBegin(ctx, "aad_file_unique", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:aad_file_unique: ", p), err) } - if err := oprot.WriteBinary(ctx, p.AadFileUnique); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.aad_file_unique (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:aad_file_unique: ", p), err) } - } - return err -} - -func (p *AesGcmV1) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSupplyAadPrefix() { - if err := oprot.WriteFieldBegin(ctx, "supply_aad_prefix", thrift.BOOL, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:supply_aad_prefix: ", p), err) } - if err := oprot.WriteBool(ctx, bool(*p.SupplyAadPrefix)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.supply_aad_prefix (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:supply_aad_prefix: ", p), err) } - } - return err -} - -func (p *AesGcmV1) Equals(other *AesGcmV1) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if bytes.Compare(p.AadPrefix, other.AadPrefix) != 0 { return false } - if bytes.Compare(p.AadFileUnique, other.AadFileUnique) != 0 { return false } - if p.SupplyAadPrefix != other.SupplyAadPrefix { - if p.SupplyAadPrefix == nil || other.SupplyAadPrefix == nil { - return false - } - if (*p.SupplyAadPrefix) != (*other.SupplyAadPrefix) { return false } - } - return true -} - -func (p *AesGcmV1) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AesGcmV1(%+v)", *p) -} - -// Attributes: -// - AadPrefix: AAD prefix * -// - AadFileUnique: Unique file identifier part of AAD suffix * -// - SupplyAadPrefix: In files encrypted with AAD prefix without storing it, -// readers must supply the prefix * -type AesGcmCtrV1 struct { - AadPrefix []byte `thrift:"aad_prefix,1" db:"aad_prefix" json:"aad_prefix,omitempty"` - AadFileUnique []byte `thrift:"aad_file_unique,2" db:"aad_file_unique" json:"aad_file_unique,omitempty"` - SupplyAadPrefix *bool `thrift:"supply_aad_prefix,3" db:"supply_aad_prefix" json:"supply_aad_prefix,omitempty"` -} - -func NewAesGcmCtrV1() *AesGcmCtrV1 { - return &AesGcmCtrV1{} -} - -var AesGcmCtrV1_AadPrefix_DEFAULT []byte - -func (p *AesGcmCtrV1) GetAadPrefix() []byte { - return p.AadPrefix -} -var AesGcmCtrV1_AadFileUnique_DEFAULT []byte - -func (p *AesGcmCtrV1) GetAadFileUnique() []byte { - return p.AadFileUnique -} -var AesGcmCtrV1_SupplyAadPrefix_DEFAULT bool -func (p *AesGcmCtrV1) GetSupplyAadPrefix() bool { - if !p.IsSetSupplyAadPrefix() { - return AesGcmCtrV1_SupplyAadPrefix_DEFAULT - } -return *p.SupplyAadPrefix -} -func (p *AesGcmCtrV1) IsSetAadPrefix() bool { - return p.AadPrefix != nil -} - -func (p *AesGcmCtrV1) IsSetAadFileUnique() bool { - return p.AadFileUnique != nil -} - -func (p *AesGcmCtrV1) IsSetSupplyAadPrefix() bool { - return p.SupplyAadPrefix != nil -} - -func (p *AesGcmCtrV1) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AesGcmCtrV1) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.AadPrefix = v -} - return nil -} - -func (p *AesGcmCtrV1) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.AadFileUnique = v -} - return nil -} - -func (p *AesGcmCtrV1) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.SupplyAadPrefix = &v -} - return nil -} - -func (p *AesGcmCtrV1) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "AesGcmCtrV1"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *AesGcmCtrV1) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetAadPrefix() { - if err := oprot.WriteFieldBegin(ctx, "aad_prefix", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:aad_prefix: ", p), err) } - if err := oprot.WriteBinary(ctx, p.AadPrefix); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.aad_prefix (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:aad_prefix: ", p), err) } - } - return err -} - -func (p *AesGcmCtrV1) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetAadFileUnique() { - if err := oprot.WriteFieldBegin(ctx, "aad_file_unique", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:aad_file_unique: ", p), err) } - if err := oprot.WriteBinary(ctx, p.AadFileUnique); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.aad_file_unique (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:aad_file_unique: ", p), err) } - } - return err -} - -func (p *AesGcmCtrV1) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetSupplyAadPrefix() { - if err := oprot.WriteFieldBegin(ctx, "supply_aad_prefix", thrift.BOOL, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:supply_aad_prefix: ", p), err) } - if err := oprot.WriteBool(ctx, bool(*p.SupplyAadPrefix)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.supply_aad_prefix (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:supply_aad_prefix: ", p), err) } - } - return err -} - -func (p *AesGcmCtrV1) Equals(other *AesGcmCtrV1) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if bytes.Compare(p.AadPrefix, other.AadPrefix) != 0 { return false } - if bytes.Compare(p.AadFileUnique, other.AadFileUnique) != 0 { return false } - if p.SupplyAadPrefix != other.SupplyAadPrefix { - if p.SupplyAadPrefix == nil || other.SupplyAadPrefix == nil { - return false - } - if (*p.SupplyAadPrefix) != (*other.SupplyAadPrefix) { return false } - } - return true -} - -func (p *AesGcmCtrV1) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AesGcmCtrV1(%+v)", *p) -} - -// Attributes: -// - AES_GCM_V1 -// - AES_GCM_CTR_V1 -type EncryptionAlgorithm struct { - AES_GCM_V1 *AesGcmV1 `thrift:"AES_GCM_V1,1" db:"AES_GCM_V1" json:"AES_GCM_V1,omitempty"` - AES_GCM_CTR_V1 *AesGcmCtrV1 `thrift:"AES_GCM_CTR_V1,2" db:"AES_GCM_CTR_V1" json:"AES_GCM_CTR_V1,omitempty"` -} - -func NewEncryptionAlgorithm() *EncryptionAlgorithm { - return &EncryptionAlgorithm{} -} - -var EncryptionAlgorithm_AES_GCM_V1_DEFAULT *AesGcmV1 -func (p *EncryptionAlgorithm) GetAES_GCM_V1() *AesGcmV1 { - if !p.IsSetAES_GCM_V1() { - return EncryptionAlgorithm_AES_GCM_V1_DEFAULT - } -return p.AES_GCM_V1 -} -var EncryptionAlgorithm_AES_GCM_CTR_V1_DEFAULT *AesGcmCtrV1 -func (p *EncryptionAlgorithm) GetAES_GCM_CTR_V1() *AesGcmCtrV1 { - if !p.IsSetAES_GCM_CTR_V1() { - return EncryptionAlgorithm_AES_GCM_CTR_V1_DEFAULT - } -return p.AES_GCM_CTR_V1 -} -func (p *EncryptionAlgorithm) CountSetFieldsEncryptionAlgorithm() int { - count := 0 - if (p.IsSetAES_GCM_V1()) { - count++ - } - if (p.IsSetAES_GCM_CTR_V1()) { - count++ - } - return count - -} - -func (p *EncryptionAlgorithm) IsSetAES_GCM_V1() bool { - return p.AES_GCM_V1 != nil -} - -func (p *EncryptionAlgorithm) IsSetAES_GCM_CTR_V1() bool { - return p.AES_GCM_CTR_V1 != nil -} - -func (p *EncryptionAlgorithm) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *EncryptionAlgorithm) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.AES_GCM_V1 = &AesGcmV1{} - if err := p.AES_GCM_V1.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.AES_GCM_V1), err) - } - return nil -} - -func (p *EncryptionAlgorithm) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - p.AES_GCM_CTR_V1 = &AesGcmCtrV1{} - if err := p.AES_GCM_CTR_V1.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.AES_GCM_CTR_V1), err) - } - return nil -} - -func (p *EncryptionAlgorithm) Write(ctx context.Context, oprot thrift.TProtocol) error { - if c := p.CountSetFieldsEncryptionAlgorithm(); c != 1 { - return fmt.Errorf("%T write union: exactly one field must be set (%d set)", p, c) - } - if err := oprot.WriteStructBegin(ctx, "EncryptionAlgorithm"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *EncryptionAlgorithm) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetAES_GCM_V1() { - if err := oprot.WriteFieldBegin(ctx, "AES_GCM_V1", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:AES_GCM_V1: ", p), err) } - if err := p.AES_GCM_V1.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.AES_GCM_V1), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:AES_GCM_V1: ", p), err) } - } - return err -} - -func (p *EncryptionAlgorithm) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetAES_GCM_CTR_V1() { - if err := oprot.WriteFieldBegin(ctx, "AES_GCM_CTR_V1", thrift.STRUCT, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:AES_GCM_CTR_V1: ", p), err) } - if err := p.AES_GCM_CTR_V1.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.AES_GCM_CTR_V1), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:AES_GCM_CTR_V1: ", p), err) } - } - return err -} - -func (p *EncryptionAlgorithm) Equals(other *EncryptionAlgorithm) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.AES_GCM_V1.Equals(other.AES_GCM_V1) { return false } - if !p.AES_GCM_CTR_V1.Equals(other.AES_GCM_CTR_V1) { return false } - return true -} - -func (p *EncryptionAlgorithm) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("EncryptionAlgorithm(%+v)", *p) -} - -// Description for file metadata -// -// Attributes: -// - Version: Version of this file * -// - Schema: Parquet schema for this file. This schema contains metadata for all the columns. -// The schema is represented as a tree with a single root. The nodes of the tree -// are flattened to a list by doing a depth-first traversal. -// The column metadata contains the path in the schema for that column which can be -// used to map columns to nodes in the schema. -// The first element is the root * -// - NumRows: Number of rows in this file * -// - RowGroups: Row groups in this file * -// - KeyValueMetadata: Optional key/value metadata * -// - CreatedBy: String for application that wrote this file. This should be in the format -// version (build ). -// e.g. impala version 1.0 (build 6cf94d29b2b7115df4de2c06e2ab4326d721eb55) -// -// - ColumnOrders: Sort order used for the min_value and max_value fields of each column in -// this file. Sort orders are listed in the order matching the columns in the -// schema. The indexes are not necessary the same though, because only leaf -// nodes of the schema are represented in the list of sort orders. -// -// Without column_orders, the meaning of the min_value and max_value fields is -// undefined. To ensure well-defined behaviour, if min_value and max_value are -// written to a Parquet file, column_orders must be written as well. -// -// The obsolete min and max fields are always sorted by signed comparison -// regardless of column_orders. -// - EncryptionAlgorithm: Encryption algorithm. This field is set only in encrypted files -// with plaintext footer. Files with encrypted footer store algorithm id -// in FileCryptoMetaData structure. -// - FooterSigningKeyMetadata: Retrieval metadata of key used for signing the footer. -// Used only in encrypted files with plaintext footer. -type FileMetaData struct { - Version int32 `thrift:"version,1,required" db:"version" json:"version"` - Schema []*SchemaElement `thrift:"schema,2,required" db:"schema" json:"schema"` - NumRows int64 `thrift:"num_rows,3,required" db:"num_rows" json:"num_rows"` - RowGroups []*RowGroup `thrift:"row_groups,4,required" db:"row_groups" json:"row_groups"` - KeyValueMetadata []*KeyValue `thrift:"key_value_metadata,5" db:"key_value_metadata" json:"key_value_metadata,omitempty"` - CreatedBy *string `thrift:"created_by,6" db:"created_by" json:"created_by,omitempty"` - ColumnOrders []*ColumnOrder `thrift:"column_orders,7" db:"column_orders" json:"column_orders,omitempty"` - EncryptionAlgorithm *EncryptionAlgorithm `thrift:"encryption_algorithm,8" db:"encryption_algorithm" json:"encryption_algorithm,omitempty"` - FooterSigningKeyMetadata []byte `thrift:"footer_signing_key_metadata,9" db:"footer_signing_key_metadata" json:"footer_signing_key_metadata,omitempty"` -} - -func NewFileMetaData() *FileMetaData { - return &FileMetaData{} -} - - -func (p *FileMetaData) GetVersion() int32 { - return p.Version -} - -func (p *FileMetaData) GetSchema() []*SchemaElement { - return p.Schema -} - -func (p *FileMetaData) GetNumRows() int64 { - return p.NumRows -} - -func (p *FileMetaData) GetRowGroups() []*RowGroup { - return p.RowGroups -} -var FileMetaData_KeyValueMetadata_DEFAULT []*KeyValue - -func (p *FileMetaData) GetKeyValueMetadata() []*KeyValue { - return p.KeyValueMetadata -} -var FileMetaData_CreatedBy_DEFAULT string -func (p *FileMetaData) GetCreatedBy() string { - if !p.IsSetCreatedBy() { - return FileMetaData_CreatedBy_DEFAULT - } -return *p.CreatedBy -} -var FileMetaData_ColumnOrders_DEFAULT []*ColumnOrder - -func (p *FileMetaData) GetColumnOrders() []*ColumnOrder { - return p.ColumnOrders -} -var FileMetaData_EncryptionAlgorithm_DEFAULT *EncryptionAlgorithm -func (p *FileMetaData) GetEncryptionAlgorithm() *EncryptionAlgorithm { - if !p.IsSetEncryptionAlgorithm() { - return FileMetaData_EncryptionAlgorithm_DEFAULT - } -return p.EncryptionAlgorithm -} -var FileMetaData_FooterSigningKeyMetadata_DEFAULT []byte - -func (p *FileMetaData) GetFooterSigningKeyMetadata() []byte { - return p.FooterSigningKeyMetadata -} -func (p *FileMetaData) IsSetKeyValueMetadata() bool { - return p.KeyValueMetadata != nil -} - -func (p *FileMetaData) IsSetCreatedBy() bool { - return p.CreatedBy != nil -} - -func (p *FileMetaData) IsSetColumnOrders() bool { - return p.ColumnOrders != nil -} - -func (p *FileMetaData) IsSetEncryptionAlgorithm() bool { - return p.EncryptionAlgorithm != nil -} - -func (p *FileMetaData) IsSetFooterSigningKeyMetadata() bool { - return p.FooterSigningKeyMetadata != nil -} - -func (p *FileMetaData) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetVersion bool = false; - var issetSchema bool = false; - var issetNumRows bool = false; - var issetRowGroups bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetVersion = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - issetSchema = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(ctx, iprot); err != nil { - return err - } - issetNumRows = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.LIST { - if err := p.ReadField4(ctx, iprot); err != nil { - return err - } - issetRowGroups = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.LIST { - if err := p.ReadField5(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.STRING { - if err := p.ReadField6(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.LIST { - if err := p.ReadField7(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 8: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField8(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 9: - if fieldTypeId == thrift.STRING { - if err := p.ReadField9(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetVersion{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Version is not set")); - } - if !issetSchema{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Schema is not set")); - } - if !issetNumRows{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field NumRows is not set")); - } - if !issetRowGroups{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RowGroups is not set")); - } - return nil -} - -func (p *FileMetaData) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(ctx); err != nil { - return thrift.PrependError("error reading field 1: ", err) -} else { - p.Version = v -} - return nil -} - -func (p *FileMetaData) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*SchemaElement, 0, size) - p.Schema = tSlice - for i := 0; i < size; i ++ { - _elem24 := &SchemaElement{} - if err := _elem24.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem24), err) - } - p.Schema = append(p.Schema, _elem24) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *FileMetaData) ReadField3(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(ctx); err != nil { - return thrift.PrependError("error reading field 3: ", err) -} else { - p.NumRows = v -} - return nil -} - -func (p *FileMetaData) ReadField4(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*RowGroup, 0, size) - p.RowGroups = tSlice - for i := 0; i < size; i ++ { - _elem25 := &RowGroup{} - if err := _elem25.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem25), err) - } - p.RowGroups = append(p.RowGroups, _elem25) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *FileMetaData) ReadField5(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*KeyValue, 0, size) - p.KeyValueMetadata = tSlice - for i := 0; i < size; i ++ { - _elem26 := &KeyValue{} - if err := _elem26.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem26), err) - } - p.KeyValueMetadata = append(p.KeyValueMetadata, _elem26) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *FileMetaData) ReadField6(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(ctx); err != nil { - return thrift.PrependError("error reading field 6: ", err) -} else { - p.CreatedBy = &v -} - return nil -} - -func (p *FileMetaData) ReadField7(ctx context.Context, iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin(ctx) - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*ColumnOrder, 0, size) - p.ColumnOrders = tSlice - for i := 0; i < size; i ++ { - _elem27 := &ColumnOrder{} - if err := _elem27.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem27), err) - } - p.ColumnOrders = append(p.ColumnOrders, _elem27) - } - if err := iprot.ReadListEnd(ctx); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *FileMetaData) ReadField8(ctx context.Context, iprot thrift.TProtocol) error { - p.EncryptionAlgorithm = &EncryptionAlgorithm{} - if err := p.EncryptionAlgorithm.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.EncryptionAlgorithm), err) - } - return nil -} - -func (p *FileMetaData) ReadField9(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 9: ", err) -} else { - p.FooterSigningKeyMetadata = v -} - return nil -} - -func (p *FileMetaData) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "FileMetaData"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - if err := p.writeField3(ctx, oprot); err != nil { return err } - if err := p.writeField4(ctx, oprot); err != nil { return err } - if err := p.writeField5(ctx, oprot); err != nil { return err } - if err := p.writeField6(ctx, oprot); err != nil { return err } - if err := p.writeField7(ctx, oprot); err != nil { return err } - if err := p.writeField8(ctx, oprot); err != nil { return err } - if err := p.writeField9(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *FileMetaData) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "version", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:version: ", p), err) } - if err := oprot.WriteI32(ctx, int32(p.Version)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.version (1) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:version: ", p), err) } - return err -} - -func (p *FileMetaData) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "schema", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:schema: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Schema)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Schema { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:schema: ", p), err) } - return err -} - -func (p *FileMetaData) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "num_rows", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:num_rows: ", p), err) } - if err := oprot.WriteI64(ctx, int64(p.NumRows)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.num_rows (3) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:num_rows: ", p), err) } - return err -} - -func (p *FileMetaData) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "row_groups", thrift.LIST, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:row_groups: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.RowGroups)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.RowGroups { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:row_groups: ", p), err) } - return err -} - -func (p *FileMetaData) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetKeyValueMetadata() { - if err := oprot.WriteFieldBegin(ctx, "key_value_metadata", thrift.LIST, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:key_value_metadata: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.KeyValueMetadata)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.KeyValueMetadata { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:key_value_metadata: ", p), err) } - } - return err -} - -func (p *FileMetaData) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetCreatedBy() { - if err := oprot.WriteFieldBegin(ctx, "created_by", thrift.STRING, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:created_by: ", p), err) } - if err := oprot.WriteString(ctx, string(*p.CreatedBy)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.created_by (6) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:created_by: ", p), err) } - } - return err -} - -func (p *FileMetaData) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetColumnOrders() { - if err := oprot.WriteFieldBegin(ctx, "column_orders", thrift.LIST, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:column_orders: ", p), err) } - if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.ColumnOrders)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.ColumnOrders { - if err := v.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(ctx); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:column_orders: ", p), err) } - } - return err -} - -func (p *FileMetaData) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetEncryptionAlgorithm() { - if err := oprot.WriteFieldBegin(ctx, "encryption_algorithm", thrift.STRUCT, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:encryption_algorithm: ", p), err) } - if err := p.EncryptionAlgorithm.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.EncryptionAlgorithm), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:encryption_algorithm: ", p), err) } - } - return err -} - -func (p *FileMetaData) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetFooterSigningKeyMetadata() { - if err := oprot.WriteFieldBegin(ctx, "footer_signing_key_metadata", thrift.STRING, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:footer_signing_key_metadata: ", p), err) } - if err := oprot.WriteBinary(ctx, p.FooterSigningKeyMetadata); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.footer_signing_key_metadata (9) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:footer_signing_key_metadata: ", p), err) } - } - return err -} - -func (p *FileMetaData) Equals(other *FileMetaData) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if p.Version != other.Version { return false } - if len(p.Schema) != len(other.Schema) { return false } - for i, _tgt := range p.Schema { - _src28 := other.Schema[i] - if !_tgt.Equals(_src28) { return false } - } - if p.NumRows != other.NumRows { return false } - if len(p.RowGroups) != len(other.RowGroups) { return false } - for i, _tgt := range p.RowGroups { - _src29 := other.RowGroups[i] - if !_tgt.Equals(_src29) { return false } - } - if len(p.KeyValueMetadata) != len(other.KeyValueMetadata) { return false } - for i, _tgt := range p.KeyValueMetadata { - _src30 := other.KeyValueMetadata[i] - if !_tgt.Equals(_src30) { return false } - } - if p.CreatedBy != other.CreatedBy { - if p.CreatedBy == nil || other.CreatedBy == nil { - return false - } - if (*p.CreatedBy) != (*other.CreatedBy) { return false } - } - if len(p.ColumnOrders) != len(other.ColumnOrders) { return false } - for i, _tgt := range p.ColumnOrders { - _src31 := other.ColumnOrders[i] - if !_tgt.Equals(_src31) { return false } - } - if !p.EncryptionAlgorithm.Equals(other.EncryptionAlgorithm) { return false } - if bytes.Compare(p.FooterSigningKeyMetadata, other.FooterSigningKeyMetadata) != 0 { return false } - return true -} - -func (p *FileMetaData) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("FileMetaData(%+v)", *p) -} - -// Crypto metadata for files with encrypted footer * -// -// Attributes: -// - EncryptionAlgorithm: Encryption algorithm. This field is only used for files -// with encrypted footer. Files with plaintext footer store algorithm id -// inside footer (FileMetaData structure). -// - KeyMetadata: Retrieval metadata of key used for encryption of footer, -// and (possibly) columns * -type FileCryptoMetaData struct { - EncryptionAlgorithm *EncryptionAlgorithm `thrift:"encryption_algorithm,1,required" db:"encryption_algorithm" json:"encryption_algorithm"` - KeyMetadata []byte `thrift:"key_metadata,2" db:"key_metadata" json:"key_metadata,omitempty"` -} - -func NewFileCryptoMetaData() *FileCryptoMetaData { - return &FileCryptoMetaData{} -} - -var FileCryptoMetaData_EncryptionAlgorithm_DEFAULT *EncryptionAlgorithm -func (p *FileCryptoMetaData) GetEncryptionAlgorithm() *EncryptionAlgorithm { - if !p.IsSetEncryptionAlgorithm() { - return FileCryptoMetaData_EncryptionAlgorithm_DEFAULT - } -return p.EncryptionAlgorithm -} -var FileCryptoMetaData_KeyMetadata_DEFAULT []byte - -func (p *FileCryptoMetaData) GetKeyMetadata() []byte { - return p.KeyMetadata -} -func (p *FileCryptoMetaData) IsSetEncryptionAlgorithm() bool { - return p.EncryptionAlgorithm != nil -} - -func (p *FileCryptoMetaData) IsSetKeyMetadata() bool { - return p.KeyMetadata != nil -} - -func (p *FileCryptoMetaData) Read(ctx context.Context, iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetEncryptionAlgorithm bool = false; - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { break; } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(ctx, iprot); err != nil { - return err - } - issetEncryptionAlgorithm = true - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - case 2: - if fieldTypeId == thrift.STRING { - if err := p.ReadField2(ctx, iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(ctx, fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetEncryptionAlgorithm{ - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field EncryptionAlgorithm is not set")); - } - return nil -} - -func (p *FileCryptoMetaData) ReadField1(ctx context.Context, iprot thrift.TProtocol) error { - p.EncryptionAlgorithm = &EncryptionAlgorithm{} - if err := p.EncryptionAlgorithm.Read(ctx, iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.EncryptionAlgorithm), err) - } - return nil -} - -func (p *FileCryptoMetaData) ReadField2(ctx context.Context, iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(ctx); err != nil { - return thrift.PrependError("error reading field 2: ", err) -} else { - p.KeyMetadata = v -} - return nil -} - -func (p *FileCryptoMetaData) Write(ctx context.Context, oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin(ctx, "FileCryptoMetaData"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } - if p != nil { - if err := p.writeField1(ctx, oprot); err != nil { return err } - if err := p.writeField2(ctx, oprot); err != nil { return err } - } - if err := oprot.WriteFieldStop(ctx); err != nil { - return thrift.PrependError("write field stop error: ", err) } - if err := oprot.WriteStructEnd(ctx); err != nil { - return thrift.PrependError("write struct stop error: ", err) } - return nil -} - -func (p *FileCryptoMetaData) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin(ctx, "encryption_algorithm", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:encryption_algorithm: ", p), err) } - if err := p.EncryptionAlgorithm.Write(ctx, oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.EncryptionAlgorithm), err) - } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:encryption_algorithm: ", p), err) } - return err -} - -func (p *FileCryptoMetaData) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) { - if p.IsSetKeyMetadata() { - if err := oprot.WriteFieldBegin(ctx, "key_metadata", thrift.STRING, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:key_metadata: ", p), err) } - if err := oprot.WriteBinary(ctx, p.KeyMetadata); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key_metadata (2) field write error: ", p), err) } - if err := oprot.WriteFieldEnd(ctx); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:key_metadata: ", p), err) } - } - return err -} - -func (p *FileCryptoMetaData) Equals(other *FileCryptoMetaData) bool { - if p == other { - return true - } else if p == nil || other == nil { - return false - } - if !p.EncryptionAlgorithm.Equals(other.EncryptionAlgorithm) { return false } - if bytes.Compare(p.KeyMetadata, other.KeyMetadata) != 0 { return false } - return true -} - -func (p *FileCryptoMetaData) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("FileCryptoMetaData(%+v)", *p) -} - diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/staticcheck.conf b/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/staticcheck.conf deleted file mode 100644 index d714bfd8..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet/staticcheck.conf +++ /dev/null @@ -1,17 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -checks = ["all", "-ST1005", "-ST1000"] diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/reader_properties.go b/vendor/github.com/apache/arrow/go/v12/parquet/reader_properties.go deleted file mode 100644 index 4700e512..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/reader_properties.go +++ /dev/null @@ -1,88 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parquet - -import ( - "bytes" - "fmt" - "io" - - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/internal/utils" -) - -// ReaderProperties are used to define how the file reader will handle buffering and allocating buffers -type ReaderProperties struct { - alloc memory.Allocator - // Default buffer size to utilize when reading chunks, when reading page - // headers or other metadata, this buffer may be increased if necessary - // to read in the necessary metadata. The value here is simply the default - // initial BufferSize when reading a new chunk. - BufferSize int64 - // create with NewFileDecryptionProperties if dealing with an encrypted file - FileDecryptProps *FileDecryptionProperties - // If this is set to true, then the reader will use SectionReader to - // just use the read stream when reading data. Otherwise we will buffer - // the data we're going to read into memory first and then read that buffer. - // - // If reading from higher latency IO, like S3, it might improve performance to - // set this to true in order to read the entire row group in at once rather than - // make multiple smaller data requests. For low latency IO streams or if only - // reading small portions / subsets of the parquet file, this can be set to false - // to reduce the amount of IO performed in order to avoid reading excess amounts of data. - BufferedStreamEnabled bool -} - -type BufferedReader interface { - Peek(int) ([]byte, error) - Discard(int) (int, error) - io.Reader -} - -// NewReaderProperties returns the default Reader Properties using the provided allocator. -// -// If nil is passed for the allocator, then memory.DefaultAllocator will be used. -func NewReaderProperties(alloc memory.Allocator) *ReaderProperties { - if alloc == nil { - alloc = memory.DefaultAllocator - } - return &ReaderProperties{alloc, DefaultBufSize, nil, false} -} - -// Allocator returns the allocator that the properties were initialized with -func (r *ReaderProperties) Allocator() memory.Allocator { return r.alloc } - -// GetStream returns a section of the underlying reader based on whether or not BufferedStream is enabled. -// -// If BufferedStreamEnabled is true, it creates an io.SectionReader, otherwise it will read the entire section -// into a buffer in memory and return a bytes.NewReader for that buffer. -func (r *ReaderProperties) GetStream(source io.ReaderAt, start, nbytes int64) (BufferedReader, error) { - if r.BufferedStreamEnabled { - return utils.NewBufferedReader(io.NewSectionReader(source, start, nbytes), int(r.BufferSize)), nil - } - - data := make([]byte, nbytes) - n, err := source.ReadAt(data, start) - if err != nil { - return nil, fmt.Errorf("parquet: tried reading from file, but got error: %w", err) - } - if n != int(nbytes) { - return nil, fmt.Errorf("parquet: tried reading %d bytes starting at position %d from file but only got %d", nbytes, start, n) - } - - return utils.NewBufferedReader(bytes.NewReader(data), int(nbytes)), nil -} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/types.go b/vendor/github.com/apache/arrow/go/v12/parquet/types.go deleted file mode 100644 index 0e6e6aec..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/types.go +++ /dev/null @@ -1,391 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parquet - -import ( - "encoding/binary" - "io" - "reflect" - "strings" - "time" - "unsafe" - - "github.com/apache/arrow/go/v12/arrow" - format "github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet" -) - -const ( - julianUnixEpoch int64 = 2440588 - nanosPerDay int64 = 3600 * 24 * 1000 * 1000 * 1000 - // Int96SizeBytes is the number of bytes that make up an Int96 - Int96SizeBytes int = 12 -) - -var ( - // Int96Traits provides information about the Int96 type - Int96Traits int96Traits - // ByteArrayTraits provides information about the ByteArray type, which is just an []byte - ByteArrayTraits byteArrayTraits - // FixedLenByteArrayTraits provides information about the FixedLenByteArray type which is just an []byte - FixedLenByteArrayTraits fixedLenByteArrayTraits - // ByteArraySizeBytes is the number of bytes returned by reflect.TypeOf(ByteArray{}).Size() - ByteArraySizeBytes int = int(reflect.TypeOf(ByteArray{}).Size()) - // FixedLenByteArraySizeBytes is the number of bytes returned by reflect.TypeOf(FixedLenByteArray{}).Size() - FixedLenByteArraySizeBytes int = int(reflect.TypeOf(FixedLenByteArray{}).Size()) -) - -// ReaderAtSeeker is a combination of the ReaderAt and ReadSeeker interfaces -// from the io package defining the only functionality that is required -// in order for a parquet file to be read by the file functions. We just need -// to be able to call ReadAt, Read, and Seek -type ReaderAtSeeker interface { - io.ReaderAt - io.Seeker -} - -// NewInt96 creates a new Int96 from the given 3 uint32 values. -func NewInt96(v [3]uint32) (out Int96) { - binary.LittleEndian.PutUint32(out[0:], v[0]) - binary.LittleEndian.PutUint32(out[4:], v[1]) - binary.LittleEndian.PutUint32(out[8:], v[2]) - return -} - -// Int96 is a 12 byte integer value utilized for representing timestamps as a 64 bit integer and a 32 bit -// integer. -type Int96 [12]byte - -// SetNanoSeconds sets the Nanosecond field of the Int96 timestamp to the provided value -func (i96 *Int96) SetNanoSeconds(nanos int64) { - binary.LittleEndian.PutUint64(i96[:8], uint64(nanos)) -} - -// String provides the string representation as a timestamp via converting to a time.Time -// and then calling String -func (i96 Int96) String() string { - return i96.ToTime().String() -} - -// ToTime returns a go time.Time object that represents the same time instant as the given Int96 value -func (i96 Int96) ToTime() time.Time { - nanos := binary.LittleEndian.Uint64(i96[:8]) - jdays := binary.LittleEndian.Uint32(i96[8:]) - - nanos = (uint64(jdays)-uint64(julianUnixEpoch))*uint64(nanosPerDay) + nanos - t := time.Unix(0, int64(nanos)) - return t.UTC() -} - -type int96Traits struct{} - -func (int96Traits) BytesRequired(n int) int { return Int96SizeBytes * n } - -func (int96Traits) CastFromBytes(b []byte) []Int96 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - var res []Int96 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Int96SizeBytes - s.Cap = h.Cap / Int96SizeBytes - - return res -} - -func (int96Traits) CastToBytes(b []Int96) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Int96SizeBytes - s.Cap = h.Cap * Int96SizeBytes - - return res -} - -// ByteArray is a type to be utilized for representing the Parquet ByteArray physical type, represented as a byte slice -type ByteArray []byte - -// Len returns the current length of the ByteArray, equivalent to len(bytearray) -func (b ByteArray) Len() int { - return len(b) -} - -// String returns a string representation of the ByteArray -func (b ByteArray) String() string { - return *(*string)(unsafe.Pointer(&b)) -} - -type byteArrayTraits struct{} - -func (byteArrayTraits) BytesRequired(n int) int { - return ByteArraySizeBytes * n -} - -func (byteArrayTraits) CastFromBytes(b []byte) []ByteArray { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - var res []ByteArray - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / ByteArraySizeBytes - s.Cap = h.Cap / ByteArraySizeBytes - - return res -} - -// FixedLenByteArray is a go type to represent a FixedLengthByteArray as a byte slice -type FixedLenByteArray []byte - -// Len returns the current length of this FixedLengthByteArray, equivalent to len(fixedlenbytearray) -func (b FixedLenByteArray) Len() int { - return len(b) -} - -// String returns a string representation of the FixedLenByteArray -func (b FixedLenByteArray) String() string { - return *(*string)(unsafe.Pointer(&b)) -} - -type fixedLenByteArrayTraits struct{} - -func (fixedLenByteArrayTraits) BytesRequired(n int) int { - return FixedLenByteArraySizeBytes * n -} - -func (fixedLenByteArrayTraits) CastFromBytes(b []byte) []FixedLenByteArray { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - var res []FixedLenByteArray - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / FixedLenByteArraySizeBytes - s.Cap = h.Cap / FixedLenByteArraySizeBytes - - return res -} - -// Creating our own enums allows avoiding the transitive dependency on the -// compiled thrift definitions in the public API, allowing us to not export -// the entire Thrift definitions, while making everything a simple cast between. -// -// It also let's us add special values like NONE to distinguish between values -// that are set or not set -type ( - // Type is the physical type as in parquet.thrift - Type format.Type - // Cipher is the parquet Cipher Algorithms - Cipher int - // ColumnOrder is the Column Order from the parquet.thrift - ColumnOrder *format.ColumnOrder - // Version is the parquet version type - Version int8 - // DataPageVersion is the version of the Parquet Data Pages - DataPageVersion int8 - // Encoding is the parquet Encoding type - Encoding format.Encoding - // Repetition is the underlying parquet field repetition type as in parquet.thrift - Repetition format.FieldRepetitionType - // ColumnPath is the path from the root of the schema to a given column - ColumnPath []string -) - -func (c ColumnPath) String() string { - if c == nil { - return "" - } - return strings.Join(c, ".") -} - -// Extend creates a new ColumnPath from an existing one, with the new ColumnPath having s appended to the end. -func (c ColumnPath) Extend(s string) ColumnPath { - p := make([]string, len(c), len(c)+1) - copy(p, c) - return append(p, s) -} - -// ColumnPathFromString constructs a ColumnPath from a dot separated string -func ColumnPathFromString(s string) ColumnPath { - return strings.Split(s, ".") -} - -// constants for choosing the Aes Algorithm to use for encryption/decryption -const ( - AesGcm Cipher = iota - AesCtr -) - -// Constants for the parquet Version which governs which data types are allowed -// and how they are represented. For example, uint32 data will be written differently -// depending on this value (as INT64 for V1_0, as UINT32 for other versions). -// -// However, some features - such as compression algorithms, encryption, -// or the improved v2 data page format must be enabled separately in writer -// properties. -const ( - // Enable only pre-2.2 parquet format features when writing. - // - // This is useful for maximum compatibility with legacy readers. - // Note that logical types may still be emitted, as long as they have - // a corresponding converted type. - V1_0 Version = iota // v1.0 - // Enable parquet format 2.4 and earlier features when writing. - // - // This enables uint32 as well as logical types which don't have a - // corresponding converted type. - // - // Note: Parquet format 2.4.0 was released in October 2017 - V2_4 // v2.4 - // Enable Parquet format 2.6 and earlier features when writing. - // - // This enables the nanos time unit in addition to the V2_4 features. - // - // Note: Parquet format 2.6.0 was released in September 2018 - V2_6 // v2.6 - // Enable the latest parquet format 2.x features. - // - // This is equal to the greatest 2.x version supported by this library. - V2_LATEST = V2_6 -) - -// constants for the parquet DataPage Version to use -const ( - DataPageV1 DataPageVersion = iota - DataPageV2 -) - -func (e Encoding) String() string { - return format.Encoding(e).String() -} - -var ( - // Types contains constants for the Physical Types that are used in the Parquet Spec - // - // They can be specified when needed as such: `parquet.Types.Int32` etc. The values - // all correspond to the values in parquet.thrift - Types = struct { - Boolean Type - Int32 Type - Int64 Type - Int96 Type - Float Type - Double Type - ByteArray Type - FixedLenByteArray Type - // this only exists as a convienence so we can denote it when necessary - // nearly all functions that take a parquet.Type will error/panic if given - // Undefined - Undefined Type - }{ - Boolean: Type(format.Type_BOOLEAN), - Int32: Type(format.Type_INT32), - Int64: Type(format.Type_INT64), - Int96: Type(format.Type_INT96), - Float: Type(format.Type_FLOAT), - Double: Type(format.Type_DOUBLE), - ByteArray: Type(format.Type_BYTE_ARRAY), - FixedLenByteArray: Type(format.Type_FIXED_LEN_BYTE_ARRAY), - Undefined: Type(format.Type_FIXED_LEN_BYTE_ARRAY + 1), - } - - // Encodings contains constants for the encoding types of the column data - // - // The values used all correspond to the values in parquet.thrift for the - // corresponding encoding type. - Encodings = struct { - Plain Encoding - PlainDict Encoding - RLE Encoding - RLEDict Encoding - BitPacked Encoding // deprecated, not implemented - DeltaByteArray Encoding - DeltaBinaryPacked Encoding - DeltaLengthByteArray Encoding - }{ - Plain: Encoding(format.Encoding_PLAIN), - PlainDict: Encoding(format.Encoding_PLAIN_DICTIONARY), - RLE: Encoding(format.Encoding_RLE), - RLEDict: Encoding(format.Encoding_RLE_DICTIONARY), - BitPacked: Encoding(format.Encoding_BIT_PACKED), - DeltaByteArray: Encoding(format.Encoding_DELTA_BYTE_ARRAY), - DeltaBinaryPacked: Encoding(format.Encoding_DELTA_BINARY_PACKED), - DeltaLengthByteArray: Encoding(format.Encoding_DELTA_LENGTH_BYTE_ARRAY), - } - - // ColumnOrders contains constants for the Column Ordering fields - ColumnOrders = struct { - Undefined ColumnOrder - TypeDefinedOrder ColumnOrder - }{ - Undefined: format.NewColumnOrder(), - TypeDefinedOrder: &format.ColumnOrder{TYPE_ORDER: format.NewTypeDefinedOrder()}, - } - - // DefaultColumnOrder is to use TypeDefinedOrder - DefaultColumnOrder = ColumnOrders.TypeDefinedOrder - - // Repetitions contains the constants for Field Repetition Types - Repetitions = struct { - Required Repetition - Optional Repetition - Repeated Repetition - Undefined Repetition // convenience value - }{ - Required: Repetition(format.FieldRepetitionType_REQUIRED), - Optional: Repetition(format.FieldRepetitionType_OPTIONAL), - Repeated: Repetition(format.FieldRepetitionType_REPEATED), - Undefined: Repetition(format.FieldRepetitionType_REPEATED + 1), - } -) - -func (t Type) String() string { - switch t { - case Types.Undefined: - return "UNDEFINED" - default: - return format.Type(t).String() - } -} - -func (r Repetition) String() string { - return strings.ToLower(format.FieldRepetitionType(r).String()) -} - -// ByteSize returns the number of bytes required to store a single value of -// the given parquet.Type in memory. -func (t Type) ByteSize() int { - switch t { - case Types.Boolean: - return 1 - case Types.Int32: - return arrow.Int32SizeBytes - case Types.Int64: - return arrow.Int64SizeBytes - case Types.Int96: - return Int96SizeBytes - case Types.Float: - return arrow.Float32SizeBytes - case Types.Double: - return arrow.Float64SizeBytes - case Types.ByteArray: - return ByteArraySizeBytes - case Types.FixedLenByteArray: - return FixedLenByteArraySizeBytes - } - panic("no bytesize info for type") -} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/version_string.go b/vendor/github.com/apache/arrow/go/v12/parquet/version_string.go deleted file mode 100644 index ab01aa48..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/version_string.go +++ /dev/null @@ -1,25 +0,0 @@ -// Code generated by "stringer -type=Version -linecomment"; DO NOT EDIT. - -package parquet - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[V1_0-0] - _ = x[V2_4-1] - _ = x[V2_6-2] -} - -const _Version_name = "v1.0v2.4v2.6" - -var _Version_index = [...]uint8{0, 4, 8, 12} - -func (i Version) String() string { - if i < 0 || i >= Version(len(_Version_index)-1) { - return "Version(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Version_name[_Version_index[i]:_Version_index[i+1]] -} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/writer_properties.go b/vendor/github.com/apache/arrow/go/v12/parquet/writer_properties.go deleted file mode 100644 index 9e9ff4dd..00000000 --- a/vendor/github.com/apache/arrow/go/v12/parquet/writer_properties.go +++ /dev/null @@ -1,533 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parquet - -import ( - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/parquet/compress" -) - -// Constants for default property values used for the default reader, writer and column props. -const ( - // Default Buffer size used for the Reader - DefaultBufSize int64 = 4096 * 4 - // Default data page size limit is 1K it's not guaranteed, but we will try to - // cut data pages off at this size where possible. - DefaultDataPageSize int64 = 1024 * 1024 - // Default is for dictionary encoding to be turned on, use WithDictionaryDefault - // writer property to change that. - DefaultDictionaryEnabled = true - // If the dictionary reaches the size of this limitation, the writer will use - // the fallback encoding (usually plain) instead of continuing to build the - // dictionary index. - DefaultDictionaryPageSizeLimit = DefaultDataPageSize - // In order to attempt to facilitate data page size limits for writing, - // data is written in batches. Increasing the batch size may improve performance - // but the larger the batch size, the easier it is to overshoot the datapage limit. - DefaultWriteBatchSize int64 = 1024 - // Default maximum number of rows for a single row group - DefaultMaxRowGroupLen int64 = 64 * 1024 * 1024 - // Default is to have stats enabled for all columns, use writer properties to - // change the default, or to enable/disable for specific columns. - DefaultStatsEnabled = true - // If the stats are larger than 4K the writer will skip writing them out anyways. - DefaultMaxStatsSize int64 = 4096 - DefaultCreatedBy = "parquet-go version 12.0.0" - DefaultRootName = "schema" -) - -// ColumnProperties defines the encoding, codec, and so on for a given column. -type ColumnProperties struct { - Encoding Encoding - Codec compress.Compression - DictionaryEnabled bool - StatsEnabled bool - MaxStatsSize int64 - CompressionLevel int -} - -// DefaultColumnProperties returns the default properties which get utilized for writing. -// -// The default column properties are the following constants: -// Encoding: Encodings.Plain -// Codec: compress.Codecs.Uncompressed -// DictionaryEnabled: DefaultDictionaryEnabled -// StatsEnabled: DefaultStatsEnabled -// MaxStatsSize: DefaultMaxStatsSize -// CompressionLevel: compress.DefaultCompressionLevel -func DefaultColumnProperties() ColumnProperties { - return ColumnProperties{ - Encoding: Encodings.Plain, - Codec: compress.Codecs.Uncompressed, - DictionaryEnabled: DefaultDictionaryEnabled, - StatsEnabled: DefaultStatsEnabled, - MaxStatsSize: DefaultMaxStatsSize, - CompressionLevel: compress.DefaultCompressionLevel, - } -} - -type writerPropConfig struct { - wr *WriterProperties - encodings map[string]Encoding - codecs map[string]compress.Compression - compressLevel map[string]int - dictEnabled map[string]bool - statsEnabled map[string]bool -} - -// WriterProperty is used as the options for building a writer properties instance -type WriterProperty func(*writerPropConfig) - -// WithAllocator specifies the writer to use the given allocator -func WithAllocator(mem memory.Allocator) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.mem = mem - } -} - -// WithDictionaryDefault sets the default value for whether to enable dictionary encoding -func WithDictionaryDefault(dict bool) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.defColumnProps.DictionaryEnabled = dict - } -} - -// WithDictionaryFor allows enabling or disabling dictionary encoding for a given column path string -func WithDictionaryFor(path string, dict bool) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.dictEnabled[path] = dict - } -} - -// WithDictionaryPath is like WithDictionaryFor, but takes a ColumnPath type -func WithDictionaryPath(path ColumnPath, dict bool) WriterProperty { - return WithDictionaryFor(path.String(), dict) -} - -// WithDictionaryPageSizeLimit is the limit of the dictionary at which the writer -// will fallback to plain encoding instead -func WithDictionaryPageSizeLimit(limit int64) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.dictPagesize = limit - } -} - -// WithBatchSize specifies the number of rows to use for batch writes to columns -func WithBatchSize(batch int64) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.batchSize = batch - } -} - -// WithMaxRowGroupLength specifies the number of rows as the maximum number of rows for a given row group in the writer. -func WithMaxRowGroupLength(nrows int64) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.maxRowGroupLen = nrows - } -} - -// WithDataPageSize specifies the size to use for splitting data pages for column writing. -func WithDataPageSize(pgsize int64) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.pageSize = pgsize - } -} - -// WithDataPageVersion specifies whether to use Version 1 or Version 2 of the DataPage spec -func WithDataPageVersion(version DataPageVersion) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.dataPageVersion = version - } -} - -// WithVersion specifies which Parquet Spec version to utilize for writing. -func WithVersion(version Version) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.parquetVersion = version - } -} - -// WithCreatedBy specifies the "created by" string to use for the writer -func WithCreatedBy(createdby string) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.createdBy = createdby - } -} - -// WithRootName enables customization of the name used for the root schema node. This is required -// to maintain compatibility with other tools. -func WithRootName(name string) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.rootName = name - } -} - -// WithRootRepetition enables customization of the repetition used for the root schema node. -// This is required to maintain compatibility with other tools. -func WithRootRepetition(repetition Repetition) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.rootRepetition = repetition - } -} - -// WithEncoding defines the encoding that is used when we aren't using dictionary encoding. -// -// This is either applied if dictionary encoding is disabled, or if we fallback if the dictionary -// grew too large. -func WithEncoding(encoding Encoding) WriterProperty { - return func(cfg *writerPropConfig) { - if encoding == Encodings.PlainDict || encoding == Encodings.RLEDict { - panic("parquet: can't use dictionary encoding as fallback encoding") - } - cfg.wr.defColumnProps.Encoding = encoding - } -} - -// WithEncodingFor is for defining the encoding only for a specific column path. This encoding will be used -// if dictionary encoding is disabled for the column or if we fallback because the dictionary grew too large -func WithEncodingFor(path string, encoding Encoding) WriterProperty { - return func(cfg *writerPropConfig) { - if encoding == Encodings.PlainDict || encoding == Encodings.RLEDict { - panic("parquet: can't use dictionary encoding as fallback encoding") - } - cfg.encodings[path] = encoding - } -} - -// WithEncodingPath is the same as WithEncodingFor but takes a ColumnPath directly. -func WithEncodingPath(path ColumnPath, encoding Encoding) WriterProperty { - return WithEncodingFor(path.String(), encoding) -} - -// WithCompression specifies the default compression type to use for column writing. -func WithCompression(codec compress.Compression) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.defColumnProps.Codec = codec - } -} - -// WithCompressionFor specifies the compression type for the given column. -func WithCompressionFor(path string, codec compress.Compression) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.codecs[path] = codec - } -} - -// WithCompressionPath is the same as WithCompressionFor but takes a ColumnPath directly. -func WithCompressionPath(path ColumnPath, codec compress.Compression) WriterProperty { - return WithCompressionFor(path.String(), codec) -} - -// WithMaxStatsSize sets a maximum size for the statistics before we decide not to include them. -func WithMaxStatsSize(maxStatsSize int64) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.defColumnProps.MaxStatsSize = maxStatsSize - } -} - -// WithCompressionLevel specifies the default compression level for the compressor in every column. -// -// The provided compression level is compressor specific. The user would have to know what the available -// levels are for the selected compressor. If the compressor does not allow for selecting different -// compression levels, then this function will have no effect. Parquet and Arrow will not validate the -// passed compression level. If no level is selected by the user or if the special compress.DefaultCompressionLevel -// value is used, then parquet will select the compression level. -func WithCompressionLevel(level int) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.defColumnProps.CompressionLevel = level - } -} - -// WithCompressionLevelFor is like WithCompressionLevel but only for the given column path. -func WithCompressionLevelFor(path string, level int) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.compressLevel[path] = level - } -} - -// WithCompressionLevelPath is the same as WithCompressionLevelFor but takes a ColumnPath -func WithCompressionLevelPath(path ColumnPath, level int) WriterProperty { - return WithCompressionLevelFor(path.String(), level) -} - -// WithStats specifies a default for whether or not to enable column statistics. -func WithStats(enabled bool) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.defColumnProps.StatsEnabled = enabled - } -} - -// WithStatsFor specifies a per column value as to enable or disable statistics in the resulting file. -func WithStatsFor(path string, enabled bool) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.statsEnabled[path] = enabled - } -} - -// WithStatsPath is the same as WithStatsFor but takes a ColumnPath -func WithStatsPath(path ColumnPath, enabled bool) WriterProperty { - return WithStatsFor(path.String(), enabled) -} - -// WithEncryptionProperties specifies the file level encryption handling for writing the file. -func WithEncryptionProperties(props *FileEncryptionProperties) WriterProperty { - return func(cfg *writerPropConfig) { - cfg.wr.encryptionProps = props - } -} - -// WriterProperties is the collection of properties to use for writing a parquet file. The values are -// read only once it has been constructed. -type WriterProperties struct { - mem memory.Allocator - dictPagesize int64 - batchSize int64 - maxRowGroupLen int64 - pageSize int64 - parquetVersion Version - createdBy string - dataPageVersion DataPageVersion - rootName string - rootRepetition Repetition - - defColumnProps ColumnProperties - columnProps map[string]*ColumnProperties - encryptionProps *FileEncryptionProperties -} - -func defaultWriterProperties() *WriterProperties { - return &WriterProperties{ - mem: memory.DefaultAllocator, - dictPagesize: DefaultDictionaryPageSizeLimit, - batchSize: DefaultWriteBatchSize, - maxRowGroupLen: DefaultMaxRowGroupLen, - pageSize: DefaultDataPageSize, - parquetVersion: V2_LATEST, - dataPageVersion: DataPageV1, - createdBy: DefaultCreatedBy, - rootName: DefaultRootName, - rootRepetition: Repetitions.Repeated, - defColumnProps: DefaultColumnProperties(), - } -} - -// NewWriterProperties takes a list of options for building the properties. If multiple options are used which conflict -// then the last option is the one which will take effect. If no WriterProperty options are provided, then the default -// properties will be utilized for writing. -// -// The Default properties use the following constants: -// Allocator: memory.DefaultAllocator -// DictionaryPageSize: DefaultDictionaryPageSizeLimit -// BatchSize: DefaultWriteBatchSize -// MaxRowGroupLength: DefaultMaxRowGroupLen -// PageSize: DefaultDataPageSize -// ParquetVersion: V1 -// DataPageVersion: DataPageV1 -// CreatedBy: DefaultCreatedBy -func NewWriterProperties(opts ...WriterProperty) *WriterProperties { - cfg := writerPropConfig{ - wr: defaultWriterProperties(), - encodings: make(map[string]Encoding), - codecs: make(map[string]compress.Compression), - compressLevel: make(map[string]int), - dictEnabled: make(map[string]bool), - statsEnabled: make(map[string]bool), - } - for _, o := range opts { - o(&cfg) - } - - cfg.wr.columnProps = make(map[string]*ColumnProperties) - get := func(key string) *ColumnProperties { - if p, ok := cfg.wr.columnProps[key]; ok { - return p - } - cfg.wr.columnProps[key] = new(ColumnProperties) - *cfg.wr.columnProps[key] = cfg.wr.defColumnProps - return cfg.wr.columnProps[key] - } - - for key, value := range cfg.encodings { - get(key).Encoding = value - } - - for key, value := range cfg.codecs { - get(key).Codec = value - } - - for key, value := range cfg.compressLevel { - get(key).CompressionLevel = value - } - - for key, value := range cfg.dictEnabled { - get(key).DictionaryEnabled = value - } - - for key, value := range cfg.statsEnabled { - get(key).StatsEnabled = value - } - return cfg.wr -} - -// FileEncryptionProperties returns the current encryption properties that were -// used to create the writer properties. -func (w *WriterProperties) FileEncryptionProperties() *FileEncryptionProperties { - return w.encryptionProps -} - -func (w *WriterProperties) Allocator() memory.Allocator { return w.mem } -func (w *WriterProperties) CreatedBy() string { return w.createdBy } -func (w *WriterProperties) RootName() string { return w.rootName } -func (w *WriterProperties) RootRepetition() Repetition { return w.rootRepetition } -func (w *WriterProperties) WriteBatchSize() int64 { return w.batchSize } -func (w *WriterProperties) DataPageSize() int64 { return w.pageSize } -func (w *WriterProperties) DictionaryPageSizeLimit() int64 { return w.dictPagesize } -func (w *WriterProperties) Version() Version { return w.parquetVersion } -func (w *WriterProperties) DataPageVersion() DataPageVersion { return w.dataPageVersion } -func (w *WriterProperties) MaxRowGroupLength() int64 { return w.maxRowGroupLen } - -// Compression returns the default compression type that will be used for any columns that don't -// have a specific compression defined. -func (w *WriterProperties) Compression() compress.Compression { return w.defColumnProps.Codec } - -// CompressionFor will return the compression type that is specified for the given column path, or -// the default compression codec if there isn't one specific to this column. -func (w *WriterProperties) CompressionFor(path string) compress.Compression { - if p, ok := w.columnProps[path]; ok { - return p.Codec - } - return w.defColumnProps.Codec -} - -//CompressionPath is the same as CompressionFor but takes a ColumnPath -func (w *WriterProperties) CompressionPath(path ColumnPath) compress.Compression { - return w.CompressionFor(path.String()) -} - -// CompressionLevel returns the default compression level that will be used for any column -// that doesn't have a compression level specified for it. -func (w *WriterProperties) CompressionLevel() int { return w.defColumnProps.CompressionLevel } - -// CompressionLevelFor returns the compression level that will be utilized for the given column, -// or the default compression level if the column doesn't have a specific level specified. -func (w *WriterProperties) CompressionLevelFor(path string) int { - if p, ok := w.columnProps[path]; ok { - return p.CompressionLevel - } - return w.defColumnProps.CompressionLevel -} - -// CompressionLevelPath is the same as CompressionLevelFor but takes a ColumnPath object -func (w *WriterProperties) CompressionLevelPath(path ColumnPath) int { - return w.CompressionLevelFor(path.String()) -} - -// Encoding returns the default encoding that will be utilized for any columns which don't have a different value -// specified. -func (w *WriterProperties) Encoding() Encoding { return w.defColumnProps.Encoding } - -// EncodingFor returns the encoding that will be used for the given column path, or the default encoding if there -// isn't one specified for this column. -func (w *WriterProperties) EncodingFor(path string) Encoding { - if p, ok := w.columnProps[path]; ok { - return p.Encoding - } - return w.defColumnProps.Encoding -} - -// EncodingPath is the same as EncodingFor but takes a ColumnPath object -func (w *WriterProperties) EncodingPath(path ColumnPath) Encoding { - return w.EncodingFor(path.String()) -} - -// DictionaryIndexEncoding returns which encoding will be used for the Dictionary Index values based on the -// parquet version. V1 uses PlainDict and V2 uses RLEDict -func (w *WriterProperties) DictionaryIndexEncoding() Encoding { - if w.parquetVersion == V1_0 { - return Encodings.PlainDict - } - return Encodings.RLEDict -} - -// DictionaryPageEncoding returns the encoding that will be utilized for the DictionaryPage itself based on the parquet -// version. V1 uses PlainDict, v2 uses Plain -func (w *WriterProperties) DictionaryPageEncoding() Encoding { - if w.parquetVersion == V1_0 { - return Encodings.PlainDict - } - return Encodings.Plain -} - -// DictionaryEnabled returns the default value as for whether or not dictionary encoding will be utilized for columns -// that aren't separately specified. -func (w *WriterProperties) DictionaryEnabled() bool { return w.defColumnProps.DictionaryEnabled } - -// DictionaryEnabledFor returns whether or not dictionary encoding will be used for the specified column when writing -// or the default value if the column was not separately specified. -func (w *WriterProperties) DictionaryEnabledFor(path string) bool { - if p, ok := w.columnProps[path]; ok { - return p.DictionaryEnabled - } - return w.defColumnProps.DictionaryEnabled -} - -// DictionaryEnabledPath is the same as DictionaryEnabledFor but takes a ColumnPath object. -func (w *WriterProperties) DictionaryEnabledPath(path ColumnPath) bool { - return w.DictionaryEnabledFor(path.String()) -} - -// StatisticsEnabled returns the default value for whether or not stats are enabled to be written for columns -// that aren't separately specified. -func (w *WriterProperties) StatisticsEnabled() bool { return w.defColumnProps.StatsEnabled } - -// StatisticsEnabledFor returns whether stats will be written for the given column path, or the default value if -// it wasn't separately specified. -func (w *WriterProperties) StatisticsEnabledFor(path string) bool { - if p, ok := w.columnProps[path]; ok { - return p.StatsEnabled - } - return w.defColumnProps.StatsEnabled -} - -// StatisticsEnabledPath is the same as StatisticsEnabledFor but takes a ColumnPath object. -func (w *WriterProperties) StatisticsEnabledPath(path ColumnPath) bool { - return w.StatisticsEnabledFor(path.String()) -} - -// MaxStatsSize returns the default maximum size for stats -func (w *WriterProperties) MaxStatsSize() int64 { return w.defColumnProps.MaxStatsSize } - -// MaxStatsSizeFor returns the maximum stat size for the given column path -func (w *WriterProperties) MaxStatsSizeFor(path string) int64 { - if p, ok := w.columnProps[path]; ok { - return p.MaxStatsSize - } - return w.defColumnProps.MaxStatsSize -} - -// MaxStatsSizePath is the same as MaxStatsSizeFor but takes a ColumnPath -func (w *WriterProperties) MaxStatsSizePath(path ColumnPath) int64 { - return w.MaxStatsSizeFor(path.String()) -} - -// ColumnEncryptionProperties returns the specific properties for encryption that will be used for the given column path -func (w *WriterProperties) ColumnEncryptionProperties(path string) *ColumnEncryptionProperties { - if w.encryptionProps != nil { - return w.encryptionProps.ColumnEncryptionProperties(path) - } - return nil -} diff --git a/vendor/github.com/apache/arrow/go/v12/LICENSE.txt b/vendor/github.com/apache/arrow/go/v14/LICENSE.txt similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/LICENSE.txt rename to vendor/github.com/apache/arrow/go/v14/LICENSE.txt index 9ea2e1f4..57310329 100644 --- a/vendor/github.com/apache/arrow/go/v12/LICENSE.txt +++ b/vendor/github.com/apache/arrow/go/v14/LICENSE.txt @@ -1681,17 +1681,6 @@ for PyArrow. Ibis is released under the Apache License, Version 2.0. -------------------------------------------------------------------------------- -This project includes code from the autobrew project. - -* r/tools/autobrew and dev/tasks/homebrew-formulae/autobrew/apache-arrow.rb - are based on code from the autobrew project. - -Copyright (c) 2019, Jeroen Ooms -License: MIT -Homepage: https://github.com/jeroen/autobrew - --------------------------------------------------------------------------------- - dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: BSD 2-Clause License diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/.editorconfig b/vendor/github.com/apache/arrow/go/v14/arrow/.editorconfig similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/.editorconfig rename to vendor/github.com/apache/arrow/go/v14/arrow/.editorconfig diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/.gitignore b/vendor/github.com/apache/arrow/go/v14/arrow/.gitignore similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/.gitignore rename to vendor/github.com/apache/arrow/go/v14/arrow/.gitignore diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.lock b/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.lock similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.lock rename to vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.lock diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.toml b/vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.toml similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/Gopkg.toml rename to vendor/github.com/apache/arrow/go/v14/arrow/Gopkg.toml diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/Makefile b/vendor/github.com/apache/arrow/go/v14/arrow/Makefile similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/Makefile rename to vendor/github.com/apache/arrow/go/v14/arrow/Makefile diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array.go b/vendor/github.com/apache/arrow/go/v14/arrow/array.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v12/arrow/array.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array.go index 9aad42b8..7622e750 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array.go @@ -17,10 +17,10 @@ package arrow import ( - "encoding/json" "fmt" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // ArrayData is the underlying memory and metadata of an Arrow array, corresponding @@ -32,21 +32,21 @@ import ( // which allows for manipulating the internal data and casting. For example, // one could cast the raw bytes from int64 to float64 like so: // -// arrdata := GetMyInt64Data().Data() -// newdata := array.NewData(arrow.PrimitiveTypes.Float64, arrdata.Len(), -// arrdata.Buffers(), nil, arrdata.NullN(), arrdata.Offset()) -// defer newdata.Release() -// float64arr := array.NewFloat64Data(newdata) -// defer float64arr.Release() +// arrdata := GetMyInt64Data().Data() +// newdata := array.NewData(arrow.PrimitiveTypes.Float64, arrdata.Len(), +// arrdata.Buffers(), nil, arrdata.NullN(), arrdata.Offset()) +// defer newdata.Release() +// float64arr := array.NewFloat64Data(newdata) +// defer float64arr.Release() // // This is also useful in an analytics setting where memory may be reused. For // example, if we had a group of operations all returning float64 such as: // -// Log(Sqrt(Expr(arr))) +// Log(Sqrt(Expr(arr))) // // The low-level implementations could have signatures such as: // -// func Log(values arrow.ArrayData) arrow.ArrayData +// func Log(values arrow.ArrayData) arrow.ArrayData // // Another example would be a function that consumes one or more memory buffers // in an input array and replaces them with newly-allocated data, changing the @@ -105,6 +105,8 @@ type Array interface { // IsValid returns true if value at index is not null. // NOTE: IsValid will panic if NullBitmapBytes is not empty and 0 > i ≥ Len. IsValid(i int) bool + // ValueStr returns the value at index as a string. + ValueStr(i int) string // Get single value to be marshalled with `json.Marshal` GetOneForMarshal(i int) interface{} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/array.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/array.go similarity index 92% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/array.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/array.go index 25c245f3..1ee04c7a 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/array.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/array.go @@ -19,14 +19,19 @@ package array import ( "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) const ( // UnknownNullCount specifies the NullN should be calculated from the null bitmap buffer. UnknownNullCount = -1 + + // NullValueStr represents a null value in arrow.Array.ValueStr and in Builder.AppendValueFromString. + // It should be returned from the arrow.Array.ValueStr implementations. + // Using it as the value in Builder.AppendValueFromString should be equivalent to Builder.AppendNull. + NullValueStr = "(null)" ) type array struct { @@ -171,6 +176,8 @@ func init() { arrow.LARGE_LIST: func(data arrow.ArrayData) arrow.Array { return NewLargeListData(data) }, arrow.INTERVAL_MONTH_DAY_NANO: func(data arrow.ArrayData) arrow.Array { return NewMonthDayNanoIntervalData(data) }, arrow.RUN_END_ENCODED: func(data arrow.ArrayData) arrow.Array { return NewRunEndEncodedData(data) }, + arrow.LIST_VIEW: func(data arrow.ArrayData) arrow.Array { return NewListViewData(data) }, + arrow.LARGE_LIST_VIEW: func(data arrow.ArrayData) arrow.Array { return NewLargeListViewData(data) }, // invalid data types to fill out array to size 2^6 - 1 63: invalidDataType, diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/binary.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/binary.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go index 4a760d57..e9e6e66e 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/binary.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/binary.go @@ -18,12 +18,13 @@ package array import ( "bytes" + "encoding/base64" "fmt" "strings" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/internal/json" ) type BinaryLike interface { @@ -56,6 +57,14 @@ func (a *Binary) Value(i int) []byte { return a.valueBytes[a.valueOffsets[idx]:a.valueOffsets[idx+1]] } +// ValueStr returns a copy of the base64-encoded string value or NullValueStr +func (a *Binary) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return base64.StdEncoding.EncodeToString(a.Value(i)) +} + // ValueString returns the string at index i without performing additional allocations. // The string is only valid for the lifetime of the Binary array. func (a *Binary) ValueString(i int) string { @@ -103,7 +112,7 @@ func (a *Binary) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%q", a.ValueString(i)) } @@ -191,6 +200,12 @@ func (a *LargeBinary) Value(i int) []byte { return a.valueBytes[a.valueOffsets[idx]:a.valueOffsets[idx+1]] } +func (a *LargeBinary) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return base64.StdEncoding.EncodeToString(a.Value(i)) +} func (a *LargeBinary) ValueString(i int) string { b := a.Value(i) return *(*string)(unsafe.Pointer(&b)) @@ -236,7 +251,7 @@ func (a *LargeBinary) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(&o, "%q", a.ValueString(i)) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/binarybuilder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go similarity index 92% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/binarybuilder.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go index dbba35f7..3cb709b4 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/binarybuilder.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/binarybuilder.go @@ -24,10 +24,10 @@ import ( "reflect" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // A BinaryBuilder is used to build a Binary array using the Append methods. @@ -125,12 +125,24 @@ func (b *BinaryBuilder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *BinaryBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *BinaryBuilder) AppendEmptyValue() { b.Reserve(1) b.appendNextOffset() b.UnsafeAppendBoolToBitmap(true) } +func (b *BinaryBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + // AppendValues will append the values in the v slice. The valid slice determines which values // in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, // all values in v are appended and considered valid. @@ -289,6 +301,25 @@ func (b *BinaryBuilder) appendNextOffset() { b.appendOffsetVal(numBytes) } +func (b *BinaryBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + + if b.dtype.IsUtf8() { + b.Append([]byte(s)) + return nil + } + + decodedVal, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return fmt.Errorf("could not decode base64 string: %w", err) + } + b.Append(decodedVal) + return nil +} + func (b *BinaryBuilder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/boolean.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/boolean.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go index 6ab64eab..464cef48 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/boolean.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/boolean.go @@ -18,12 +18,13 @@ package array import ( "fmt" + "strconv" "strings" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // A type which represents an immutable sequence of boolean values. @@ -55,6 +56,14 @@ func (a *Boolean) Value(i int) bool { return bitutil.BitIsSet(a.values, a.array.data.offset+i) } +func (a *Boolean) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } else { + return strconv.FormatBool(a.Value(i)) + } +} + func (a *Boolean) String() string { o := new(strings.Builder) o.WriteString("[") @@ -64,7 +73,7 @@ func (a *Boolean) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", a.Value(i)) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/booleanbuilder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/booleanbuilder.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go index b2d25b4d..10b7405a 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/booleanbuilder.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/booleanbuilder.go @@ -23,11 +23,11 @@ import ( "strconv" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) type BooleanBuilder struct { @@ -77,11 +77,36 @@ func (b *BooleanBuilder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *BooleanBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *BooleanBuilder) AppendEmptyValue() { b.Reserve(1) b.UnsafeAppend(false) } +func (b *BooleanBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + +func (b *BooleanBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := strconv.ParseBool(s) + if err != nil { + return err + } + b.Append(val) + return nil +} + func (b *BooleanBuilder) UnsafeAppend(v bool) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) if v { @@ -229,6 +254,10 @@ func (b *BooleanBuilder) UnmarshalJSON(data []byte) error { return b.Unmarshal(dec) } +func (b *BooleanBuilder) Value(i int) bool { + return bitutil.BitIsSet(b.rawData, i) +} + var ( _ Builder = (*BooleanBuilder)(nil) ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go index 50e5a264..e023b0d9 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder.go @@ -19,9 +19,9 @@ package array import ( "sync/atomic" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" ) type bufBuilder interface { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_byte.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_byte.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go index 7e30639a..00a0d1c2 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_byte.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_byte.go @@ -16,7 +16,7 @@ package array -import "github.com/apache/arrow/go/v12/arrow/memory" +import "github.com/apache/arrow/go/v14/arrow/memory" type byteBufferBuilder struct { bufferBuilder diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go index 1425d0b8..879bc9f5 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go @@ -19,9 +19,9 @@ package array import ( - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/memory" ) type int64BufferBuilder struct { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl similarity index 94% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl index ccda145e..e859b5bf 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/bufferbuilder_numeric.gen.go.tmpl +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/bufferbuilder_numeric.gen.go.tmpl @@ -17,9 +17,9 @@ package array import ( - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/memory" ) {{range .In}} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/builder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/builder.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go index 26cc76bb..2f15ac96 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/builder.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/builder.go @@ -20,10 +20,10 @@ import ( "fmt" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) const ( @@ -58,9 +58,18 @@ type Builder interface { // AppendNull adds a new null value to the array being built. AppendNull() + // AppendNulls adds new n null values to the array being built. + AppendNulls(n int) + // AppendEmptyValue adds a new zero value of the appropriate type AppendEmptyValue() + // AppendEmptyValues adds new n zero values of the appropriate type + AppendEmptyValues(n int) + + // AppendValueFromString adds a new value from a string. Inverse of array.ValueStr(i int) string + AppendValueFromString(string) error + // Reserve ensures there is enough space for appending n elements // by checking the capacity and calling Resize if necessary. Reserve(n int) @@ -74,6 +83,12 @@ type Builder interface { // a new array. NewArray() arrow.Array + // IsNull returns if a previously appended value at a given index is null or not. + IsNull(i int) bool + + // SetNull sets the value at index i to null. + SetNull(i int) + UnsafeAppendBoolToBitmap(bool) init(capacity int) @@ -110,6 +125,17 @@ func (b *builder) Cap() int { return b.capacity } // NullN returns the number of null values in the array builder. func (b *builder) NullN() int { return b.nulls } +func (b *builder) IsNull(i int) bool { + return b.nullBitmap.Len() != 0 && bitutil.BitIsNotSet(b.nullBitmap.Bytes(), i) +} + +func (b *builder) SetNull(i int) { + if i < 0 || i >= b.length { + panic("arrow/array: index out of range") + } + bitutil.ClearBit(b.nullBitmap.Bytes(), i) +} + func (b *builder) init(capacity int) { toAlloc := bitutil.CeilByte(capacity) / 8 b.nullBitmap = memory.NewResizableBuffer(b.mem) @@ -316,6 +342,12 @@ func NewBuilder(mem memory.Allocator, dtype arrow.DataType) Builder { case arrow.MAP: typ := dtype.(*arrow.MapType) return NewMapBuilderWithType(mem, typ) + case arrow.LIST_VIEW: + typ := dtype.(*arrow.ListViewType) + return NewListViewBuilderWithField(mem, typ.ElemField()) + case arrow.LARGE_LIST_VIEW: + typ := dtype.(*arrow.LargeListViewType) + return NewLargeListViewBuilderWithField(mem, typ.ElemField()) case arrow.EXTENSION: typ := dtype.(arrow.ExtensionType) bldr := NewExtensionBuilder(mem, typ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/compare.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go similarity index 81% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/compare.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go index 75b9993b..e70716be 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/compare.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/compare.go @@ -20,8 +20,9 @@ import ( "fmt" "math" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/float16" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow/go/v14/internal/bitutils" ) // RecordEqual reports whether the two provided records are equal. @@ -36,7 +37,7 @@ func RecordEqual(left, right arrow.Record) bool { for i := range left.Columns() { lc := left.Column(i) rc := right.Column(i) - if !ArrayEqual(lc, rc) { + if !Equal(lc, rc) { return false } } @@ -195,15 +196,6 @@ func TableApproxEqual(left, right arrow.Table, opts ...EqualOption) bool { return true } -// ArrayEqual reports whether the two provided arrays are equal. -// -// Deprecated: This currently just delegates to calling Equal. This will be -// removed in v9 so please update any calling code to just call array.Equal -// directly instead. -func ArrayEqual(left, right arrow.Array) bool { - return Equal(left, right) -} - // Equal reports whether the two provided arrays are equal. func Equal(left, right arrow.Array) bool { switch { @@ -300,6 +292,12 @@ func Equal(left, right arrow.Array) bool { case *LargeList: r := right.(*LargeList) return arrayEqualLargeList(l, r) + case *ListView: + r := right.(*ListView) + return arrayEqualListView(l, r) + case *LargeListView: + r := right.(*LargeListView) + return arrayEqualLargeListView(l, r) case *FixedSizeList: r := right.(*FixedSizeList) return arrayEqualFixedSizeList(l, r) @@ -341,14 +339,6 @@ func Equal(left, right arrow.Array) bool { } } -// ArraySliceEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are equal. -// -// Deprecated: Renamed to just array.SliceEqual, this currently will just delegate to the renamed -// function and will be removed in v9. Please update any calling code. -func ArraySliceEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool { - return SliceEqual(left, lbeg, lend, right, rbeg, rend) -} - // SliceEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are equal. func SliceEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64) bool { l := NewSlice(left, lbeg, lend) @@ -359,14 +349,6 @@ func SliceEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, ren return Equal(l, r) } -// ArraySliceApproxEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are approximately equal. -// -// Deprecated: renamed to just SliceApproxEqual and will be removed in v9. Please update -// calling code to just call array.SliceApproxEqual. -func ArraySliceApproxEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64, opts ...EqualOption) bool { - return SliceApproxEqual(left, lbeg, lend, right, rbeg, rend, opts...) -} - // SliceApproxEqual reports whether slices left[lbeg:lend] and right[rbeg:rend] are approximately equal. func SliceApproxEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbeg, rend int64, opts ...EqualOption) bool { opt := newEqualOption(opts...) @@ -385,8 +367,9 @@ func sliceApproxEqual(left arrow.Array, lbeg, lend int64, right arrow.Array, rbe const defaultAbsoluteTolerance = 1e-5 type equalOption struct { - atol float64 // absolute tolerance - nansEq bool // whether NaNs are considered equal. + atol float64 // absolute tolerance + nansEq bool // whether NaNs are considered equal. + unorderedMapKeys bool // whether maps are allowed to have different entries order } func (eq equalOption) f16(f1, f2 float16.Num) bool { @@ -450,17 +433,15 @@ func WithAbsTolerance(atol float64) EqualOption { } } -// ArrayApproxEqual reports whether the two provided arrays are approximately equal. -// For non-floating point arrays, it is equivalent to ArrayEqual. -// -// Deprecated: renamed to just ApproxEqual, this alias will be removed in v9. Please update -// calling code to just call array.ApproxEqual -func ArrayApproxEqual(left, right arrow.Array, opts ...EqualOption) bool { - return ApproxEqual(left, right, opts...) +// WithUnorderedMapKeys configures the comparison functions so that Map with different entries order are considered equal. +func WithUnorderedMapKeys(v bool) EqualOption { + return func(o *equalOption) { + o.unorderedMapKeys = v + } } // ApproxEqual reports whether the two provided arrays are approximately equal. -// For non-floating point arrays, it is equivalent to ArrayEqual. +// For non-floating point arrays, it is equivalent to Equal. func ApproxEqual(left, right arrow.Array, opts ...EqualOption) bool { opt := newEqualOption(opts...) return arrayApproxEqual(left, right, opt) @@ -561,6 +542,12 @@ func arrayApproxEqual(left, right arrow.Array, opt equalOption) bool { case *LargeList: r := right.(*LargeList) return arrayApproxEqualLargeList(l, r, opt) + case *ListView: + r := right.(*ListView) + return arrayApproxEqualListView(l, r, opt) + case *LargeListView: + r := right.(*LargeListView) + return arrayApproxEqualLargeListView(l, r, opt) case *FixedSizeList: r := right.(*FixedSizeList) return arrayApproxEqualFixedSizeList(l, r, opt) @@ -581,6 +568,9 @@ func arrayApproxEqual(left, right arrow.Array, opt equalOption) bool { return arrayEqualDuration(l, r) case *Map: r := right.(*Map) + if opt.unorderedMapKeys { + return arrayApproxEqualMap(l, r, opt) + } return arrayApproxEqualList(l.List, r.List, opt) case *Dictionary: r := right.(*Dictionary) @@ -704,6 +694,44 @@ func arrayApproxEqualLargeList(left, right *LargeList, opt equalOption) bool { return true } +func arrayApproxEqualListView(left, right *ListView, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return arrayApproxEqual(l, r, opt) + }() + if !o { + return false + } + } + return true +} + +func arrayApproxEqualLargeListView(left, right *LargeListView, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return arrayApproxEqual(l, r, opt) + }() + if !o { + return false + } + } + return true +} + func arrayApproxEqualFixedSizeList(left, right *FixedSizeList, opt equalOption) bool { for i := 0; i < left.Len(); i++ { if left.IsNull(i) { @@ -724,11 +752,91 @@ func arrayApproxEqualFixedSizeList(left, right *FixedSizeList, opt equalOption) } func arrayApproxEqualStruct(left, right *Struct, opt equalOption) bool { - for i, lf := range left.fields { - rf := right.fields[i] - if !arrayApproxEqual(lf, rf, opt) { + return bitutils.VisitSetBitRuns( + left.NullBitmapBytes(), + int64(left.Offset()), int64(left.Len()), + approxEqualStructRun(left, right, opt), + ) == nil +} + +func approxEqualStructRun(left, right *Struct, opt equalOption) bitutils.VisitFn { + return func(pos int64, length int64) error { + for i := range left.fields { + if !sliceApproxEqual(left.fields[i], pos, pos+length, right.fields[i], pos, pos+length, opt) { + return arrow.ErrInvalid + } + } + return nil + } +} + +// arrayApproxEqualMap doesn't care about the order of keys (in Go map traversal order is undefined) +func arrayApproxEqualMap(left, right *Map, opt equalOption) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if !arrayApproxEqualSingleMapEntry(left.newListValue(i).(*Struct), right.newListValue(i).(*Struct), opt) { return false } } return true } + +// arrayApproxEqualSingleMapEntry is a helper function that checks if a single entry pair is approx equal. +// Basically, it doesn't care about key order. +// structs passed will be released +func arrayApproxEqualSingleMapEntry(left, right *Struct, opt equalOption) bool { + defer left.Release() + defer right.Release() + + // we don't compare the validity bitmap, but we want other checks from baseArrayEqual + switch { + case left.Len() != right.Len(): + return false + case left.NullN() != right.NullN(): + return false + case !arrow.TypeEqual(left.DataType(), right.DataType()): // We do not check for metadata as in the C++ implementation. + return false + case left.NullN() == left.Len(): + return true + } + + used := make(map[int]bool, right.Len()) + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + + found := false + lBeg, lEnd := int64(i), int64(i+1) + for j := 0; j < right.Len(); j++ { + if used[j] { + continue + } + if right.IsNull(j) { + used[j] = true + continue + } + + rBeg, rEnd := int64(j), int64(j+1) + + // check keys (field 0) + if !sliceApproxEqual(left.Field(0), lBeg, lEnd, right.Field(0), rBeg, rEnd, opt) { + continue + } + + // only now check the values + if sliceApproxEqual(left.Field(1), lBeg, lEnd, right.Field(1), rBeg, rEnd, opt) { + found = true + used[j] = true + break + } + } + if !found { + return false + } + } + + return len(used) == right.Len() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/concat.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go similarity index 79% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/concat.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go index c8e12318..9d815023 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/concat.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/concat.go @@ -21,14 +21,15 @@ import ( "fmt" "math" "math/bits" - - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/encoded" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/internal/bitutils" - "github.com/apache/arrow/go/v12/internal/utils" + "unsafe" + + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/encoded" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/bitutils" + "github.com/apache/arrow/go/v14/internal/utils" ) // Concatenate creates a new arrow.Array which is the concatenation of the @@ -41,17 +42,6 @@ func Concatenate(arrs []arrow.Array, mem memory.Allocator) (result arrow.Array, return nil, errors.New("array/concat: must pass at least one array") } - defer func() { - if pErr := recover(); pErr != nil { - switch e := pErr.(type) { - case error: - err = fmt.Errorf("arrow/concat: %w", e) - default: - err = fmt.Errorf("arrow/concat: %v", pErr) - } - } - }() - // gather Data of inputs data := make([]arrow.ArrayData, len(arrs)) for i, ar := range arrs { @@ -366,10 +356,181 @@ func concatOffsets(buffers []*memory.Buffer, byteWidth int, mem memory.Allocator } } +func sumArraySizes(data []arrow.ArrayData) int { + outSize := 0 + for _, arr := range data { + outSize += arr.Len() + } + return outSize +} + +func getListViewBufferValues[T int32 | int64](data arrow.ArrayData, i int) []T { + bytes := data.Buffers()[i].Bytes() + base := (*T)(unsafe.Pointer(&bytes[0])) + ret := unsafe.Slice(base, data.Offset()+data.Len()) + return ret[data.Offset():] +} + +func putListViewOffsets32(in arrow.ArrayData, displacement int32, out *memory.Buffer, outOff int) { + debug.Assert(in.DataType().ID() == arrow.LIST_VIEW, "putListViewOffsets32: expected LIST_VIEW data") + inOff, inLen := in.Offset(), in.Len() + if inLen == 0 { + return + } + bitmap := in.Buffers()[0] + srcOffsets := getListViewBufferValues[int32](in, 1) + srcSizes := getListViewBufferValues[int32](in, 2) + isValidAndNonEmpty := func(i int) bool { + return (bitmap == nil || bitutil.BitIsSet(bitmap.Bytes(), inOff+i)) && srcSizes[i] > 0 + } + + dstOffsets := arrow.Int32Traits.CastFromBytes(out.Bytes()) + for i, offset := range srcOffsets { + if isValidAndNonEmpty(i) { + // This is guaranteed by RangeOfValuesUsed returning the smallest offset + // of valid and non-empty list-views. + debug.Assert(offset+displacement >= 0, "putListViewOffsets32: offset underflow while concatenating arrays") + dstOffsets[outOff+i] = offset + displacement + } else { + dstOffsets[outOff+i] = 0 + } + } +} + +func putListViewOffsets64(in arrow.ArrayData, displacement int64, out *memory.Buffer, outOff int) { + debug.Assert(in.DataType().ID() == arrow.LARGE_LIST_VIEW, "putListViewOffsets64: expected LARGE_LIST_VIEW data") + inOff, inLen := in.Offset(), in.Len() + if inLen == 0 { + return + } + bitmap := in.Buffers()[0] + srcOffsets := getListViewBufferValues[int64](in, 1) + srcSizes := getListViewBufferValues[int64](in, 2) + isValidAndNonEmpty := func(i int) bool { + return (bitmap == nil || bitutil.BitIsSet(bitmap.Bytes(), inOff+i)) && srcSizes[i] > 0 + } + + dstOffsets := arrow.Int64Traits.CastFromBytes(out.Bytes()) + for i, offset := range srcOffsets { + if isValidAndNonEmpty(i) { + // This is guaranteed by RangeOfValuesUsed returning the smallest offset + // of valid and non-empty list-views. + debug.Assert(offset+displacement >= 0, "putListViewOffsets64: offset underflow while concatenating arrays") + dstOffsets[outOff+i] = offset + displacement + } else { + dstOffsets[outOff+i] = 0 + } + } +} + +// Concatenate buffers holding list-view offsets into a single buffer of offsets +// +// valueRanges contains the relevant ranges of values in the child array actually +// referenced to by the views. Most commonly, these ranges will start from 0, +// but when that is not the case, we need to adjust the displacement of offsets. +// The concatenated child array does not contain values from the beginning +// if they are not referenced to by any view. +func concatListViewOffsets(data []arrow.ArrayData, byteWidth int, valueRanges []rng, mem memory.Allocator) (*memory.Buffer, error) { + outSize := sumArraySizes(data) + if byteWidth == 4 && outSize > math.MaxInt32 { + return nil, fmt.Errorf("%w: offset overflow while concatenating arrays", arrow.ErrInvalid) + } + out := memory.NewResizableBuffer(mem) + out.Resize(byteWidth * outSize) + + numChildValues, elementsLength := 0, 0 + for i, arr := range data { + displacement := numChildValues - valueRanges[i].offset + if byteWidth == 4 { + putListViewOffsets32(arr, int32(displacement), out, elementsLength) + } else { + putListViewOffsets64(arr, int64(displacement), out, elementsLength) + } + elementsLength += arr.Len() + numChildValues += valueRanges[i].len + } + debug.Assert(elementsLength == outSize, "implementation error") + + return out, nil +} + +func zeroNullListViewSizes[T int32 | int64](data arrow.ArrayData) { + if data.Len() == 0 || data.Buffers()[0] == nil { + return + } + validity := data.Buffers()[0].Bytes() + sizes := getListViewBufferValues[T](data, 2) + + for i := 0; i < data.Len(); i++ { + if !bitutil.BitIsSet(validity, data.Offset()+i) { + sizes[i] = 0 + } + } +} + +func concatListView(data []arrow.ArrayData, offsetType arrow.FixedWidthDataType, out *Data, mem memory.Allocator) (err error) { + // Calculate the ranges of values that each list-view array uses + valueRanges := make([]rng, len(data)) + for i, input := range data { + offset, len := rangeOfValuesUsed(input) + valueRanges[i].offset = offset + valueRanges[i].len = len + } + + // Gather the children ranges of each input array + childData := gatherChildrenRanges(data, 0, valueRanges) + for _, c := range childData { + defer c.Release() + } + + // Concatenate the values + values, err := concat(childData, mem) + if err != nil { + return err + } + + // Concatenate the offsets + offsetBuffer, err := concatListViewOffsets(data, offsetType.Bytes(), valueRanges, mem) + if err != nil { + return err + } + + // Concatenate the sizes + sizeBuffers := gatherBuffersFixedWidthType(data, 2, offsetType) + sizeBuffer := concatBuffers(sizeBuffers, mem) + + out.childData = []arrow.ArrayData{values} + out.buffers[1] = offsetBuffer + out.buffers[2] = sizeBuffer + + // To make sure the sizes don't reference values that are not in the new + // concatenated values array, we zero the sizes of null list-view values. + if offsetType.ID() == arrow.INT32 { + zeroNullListViewSizes[int32](out) + } else { + zeroNullListViewSizes[int64](out) + } + + return nil +} + // concat is the implementation for actually performing the concatenation of the arrow.ArrayData // objects that we can call internally for nested types. -func concat(data []arrow.ArrayData, mem memory.Allocator) (arrow.ArrayData, error) { +func concat(data []arrow.ArrayData, mem memory.Allocator) (arr arrow.ArrayData, err error) { out := &Data{refCount: 1, dtype: data[0].DataType(), nulls: 0} + defer func() { + if pErr := recover(); pErr != nil { + switch e := pErr.(type) { + case error: + err = fmt.Errorf("arrow/concat: %w", e) + default: + err = fmt.Errorf("arrow/concat: %v", pErr) + } + } + if err != nil { + out.Release() + } + }() for _, d := range data { out.length += d.Len() if out.nulls == UnknownNullCount || d.NullN() == UnknownNullCount { @@ -445,8 +606,8 @@ func concat(data []arrow.ArrayData, mem memory.Allocator) (arrow.ArrayData, erro if err != nil { return nil, err } - out.buffers[2] = concatBuffers(gatherBufferRanges(data, 2, valueRanges), mem) out.buffers[1] = offsetBuffer + out.buffers[2] = concatBuffers(gatherBufferRanges(data, 2, valueRanges), mem) case *arrow.ListType: offsetWidth := dt.Layout().Buffers[1].ByteWidth offsetBuffer, valueRanges, err := concatOffsets(gatherFixedBuffers(data, 1, offsetWidth), offsetWidth, mem) @@ -481,6 +642,18 @@ func concat(data []arrow.ArrayData, mem memory.Allocator) (arrow.ArrayData, erro if err != nil { return nil, err } + case *arrow.ListViewType: + offsetType := arrow.PrimitiveTypes.Int32.(arrow.FixedWidthDataType) + err := concatListView(data, offsetType, out, mem) + if err != nil { + return nil, err + } + case *arrow.LargeListViewType: + offsetType := arrow.PrimitiveTypes.Int64.(arrow.FixedWidthDataType) + err := concatListView(data, offsetType, out, mem) + if err != nil { + return nil, err + } case *arrow.FixedSizeListType: childData := gatherChildrenMultiplier(data, 0, int(dt.Len())) for _, c := range childData { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/data.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/data.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/data.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/data.go index 55a84d8c..49df06fb 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/data.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/data.go @@ -22,9 +22,9 @@ import ( "sync/atomic" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" ) // Data represents the memory and metadata of an Arrow array. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal128.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/decimal128.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go index fb6423e2..33175316 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal128.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal128.go @@ -25,12 +25,12 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // A type which represents an immutable sequence of 128-bit decimal values. @@ -49,6 +49,13 @@ func NewDecimal128Data(data arrow.ArrayData) *Decimal128 { func (a *Decimal128) Value(i int) decimal128.Num { return a.values[i] } +func (a *Decimal128) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.GetOneForMarshal(i).(string) +} + func (a *Decimal128) Values() []decimal128.Num { return a.values } func (a *Decimal128) String() string { @@ -60,7 +67,7 @@ func (a *Decimal128) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", a.Value(i)) } @@ -163,10 +170,22 @@ func (b *Decimal128Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Decimal128Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Decimal128Builder) AppendEmptyValue() { b.Append(decimal128.Num{}) } +func (b *Decimal128Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Decimal128Builder) UnsafeAppendBoolToBitmap(isValid bool) { if isValid { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) @@ -260,6 +279,20 @@ func (b *Decimal128Builder) newData() (data *Data) { return } +func (b *Decimal128Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := decimal128.FromString(s, b.dtype.Precision, b.dtype.Scale) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + return nil +} + func (b *Decimal128Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal256.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/decimal256.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go index 0f007c7c..d63544f7 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/decimal256.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/decimal256.go @@ -25,12 +25,12 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // Decimal256 is a type that represents an immutable sequence of 256-bit decimal values. @@ -49,6 +49,13 @@ func NewDecimal256Data(data arrow.ArrayData) *Decimal256 { func (a *Decimal256) Value(i int) decimal256.Num { return a.values[i] } +func (a *Decimal256) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.GetOneForMarshal(i).(string) +} + func (a *Decimal256) Values() []decimal256.Num { return a.values } func (a *Decimal256) String() string { @@ -60,7 +67,7 @@ func (a *Decimal256) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", a.Value(i)) } @@ -160,10 +167,22 @@ func (b *Decimal256Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Decimal256Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Decimal256Builder) AppendEmptyValue() { b.Append(decimal256.Num{}) } +func (b *Decimal256Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Decimal256Builder) Type() arrow.DataType { return b.dtype } func (b *Decimal256Builder) UnsafeAppendBoolToBitmap(isValid bool) { @@ -259,6 +278,20 @@ func (b *Decimal256Builder) newData() (data *Data) { return } +func (b *Decimal256Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := decimal256.FromString(s, b.dtype.Precision, b.dtype.Scale) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + return nil +} + func (b *Decimal256Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/dictionary.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go similarity index 87% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/dictionary.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go index 70f77e24..d0a1c4dc 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/dictionary.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/dictionary.go @@ -25,15 +25,16 @@ import ( "sync/atomic" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/float16" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/internal/hashing" - "github.com/apache/arrow/go/v12/internal/utils" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/hashing" + "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow/go/v14/internal/utils" ) // Dictionary represents the type for dictionary-encoded data with a data @@ -45,12 +46,12 @@ import ( // // For example, the array: // -// ["foo", "bar", "foo", "bar", "foo", "bar"] +// ["foo", "bar", "foo", "bar", "foo", "bar"] // // with dictionary ["bar", "foo"], would have the representation of: // -// indices: [1, 0, 1, 0, 1, 0] -// dictionary: ["bar", "foo"] +// indices: [1, 0, 1, 0, 1, 0] +// dictionary: ["bar", "foo"] // // The indices in principle may be any integer type. type Dictionary struct { @@ -250,7 +251,14 @@ func (d *Dictionary) CanCompareIndices(other *Dictionary) bool { } minlen := int64(min(d.data.dictionary.length, other.data.dictionary.length)) - return ArraySliceEqual(d.Dictionary(), 0, minlen, other.Dictionary(), 0, minlen) + return SliceEqual(d.Dictionary(), 0, minlen, other.Dictionary(), 0, minlen) +} + +func (d *Dictionary) ValueStr(i int) string { + if d.IsNull(i) { + return NullValueStr + } + return d.Dictionary().ValueStr(d.GetValueIndex(i)) } func (d *Dictionary) String() string { @@ -298,7 +306,7 @@ func (d *Dictionary) MarshalJSON() ([]byte, error) { } func arrayEqualDict(l, r *Dictionary) bool { - return ArrayEqual(l.Dictionary(), r.Dictionary()) && ArrayEqual(l.indices, r.indices) + return Equal(l.Dictionary(), r.Dictionary()) && Equal(l.indices, r.indices) } func arrayApproxEqualDict(l, r *Dictionary, opt equalOption) bool { @@ -306,13 +314,13 @@ func arrayApproxEqualDict(l, r *Dictionary, opt equalOption) bool { } // helper for building the properly typed indices of the dictionary builder -type indexBuilder struct { +type IndexBuilder struct { Builder Append func(int) } -func createIndexBuilder(mem memory.Allocator, dt arrow.FixedWidthDataType) (ret indexBuilder, err error) { - ret = indexBuilder{Builder: NewBuilder(mem, dt)} +func createIndexBuilder(mem memory.Allocator, dt arrow.FixedWidthDataType) (ret IndexBuilder, err error) { + ret = IndexBuilder{Builder: NewBuilder(mem, dt)} switch dt.ID() { case arrow.INT8: ret.Append = func(idx int) { @@ -412,7 +420,7 @@ type dictionaryBuilder struct { dt *arrow.DictionaryType deltaOffset int memoTable hashing.MemoTable - idxBuilder indexBuilder + idxBuilder IndexBuilder } // NewDictionaryBuilderWithDict initializes a dictionary builder and inserts the values from `init` as the first @@ -687,11 +695,23 @@ func (b *dictionaryBuilder) AppendNull() { b.idxBuilder.AppendNull() } +func (b *dictionaryBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *dictionaryBuilder) AppendEmptyValue() { b.length += 1 b.idxBuilder.AppendEmptyValue() } +func (b *dictionaryBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *dictionaryBuilder) Reserve(n int) { b.idxBuilder.Reserve(n) } @@ -709,7 +729,8 @@ func (b *dictionaryBuilder) ResetFull() { func (b *dictionaryBuilder) Cap() int { return b.idxBuilder.Cap() } -// UnmarshalJSON is not yet implemented for dictionary builders and will always error. +func (b *dictionaryBuilder) IsNull(i int) bool { return b.idxBuilder.IsNull(i) } + func (b *dictionaryBuilder) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) t, err := dec.Token() @@ -737,8 +758,30 @@ func (b *dictionaryBuilder) Unmarshal(dec *json.Decoder) error { return b.AppendArray(arr) } +func (b *dictionaryBuilder) AppendValueFromString(s string) error { + bldr := NewBuilder(b.mem, b.dt.ValueType) + defer bldr.Release() + + if err := bldr.AppendValueFromString(s); err != nil { + return err + } + + arr := bldr.NewArray() + defer arr.Release() + return b.AppendArray(arr) +} + func (b *dictionaryBuilder) UnmarshalOne(dec *json.Decoder) error { - return errors.New("unmarshal json to dictionary not yet implemented") + bldr := NewBuilder(b.mem, b.dt.ValueType) + defer bldr.Release() + + if err := bldr.UnmarshalOne(dec); err != nil { + return err + } + + arr := bldr.NewArray() + defer arr.Release() + return b.AppendArray(arr) } func (b *dictionaryBuilder) NewArray() arrow.Array { @@ -771,11 +814,11 @@ func (b *dictionaryBuilder) newWithDictOffset(offset int) (indices, dict *Data, defer idxarr.Release() indices = idxarr.Data().(*Data) - indices.Retain() b.deltaOffset = b.memoTable.Size() dict, err = GetDictArrayData(b.mem, b.dt.ValueType, b.memoTable, offset) b.reset() + indices.Retain() return } @@ -799,6 +842,11 @@ func (b *dictionaryBuilder) insertDictValue(val interface{}) error { return err } +func (b *dictionaryBuilder) insertDictBytes(val []byte) error { + _, _, err := b.memoTable.GetOrInsertBytes(val) + return err +} + func (b *dictionaryBuilder) appendValue(val interface{}) error { idx, _, err := b.memoTable.GetOrInsert(val) b.idxBuilder.Append(idx) @@ -806,6 +854,13 @@ func (b *dictionaryBuilder) appendValue(val interface{}) error { return err } +func (b *dictionaryBuilder) appendBytes(val []byte) error { + idx, _, err := b.memoTable.GetOrInsertBytes(val) + b.idxBuilder.Append(idx) + b.length += 1 + return err +} + func getvalFn(arr arrow.Array) func(i int) interface{} { switch typedarr := arr.(type) { case *Int8: @@ -855,6 +910,11 @@ func getvalFn(arr arrow.Array) func(i int) interface{} { val := typedarr.Value(i) return (*(*[arrow.Decimal128SizeBytes]byte)(unsafe.Pointer(&val)))[:] } + case *Decimal256: + return func(i int) interface{} { + val := typedarr.Value(i) + return (*(*[arrow.Decimal256SizeBytes]byte)(unsafe.Pointer(&val)))[:] + } case *DayTimeInterval: return func(i int) interface{} { val := typedarr.Value(i) @@ -886,6 +946,10 @@ func (b *dictionaryBuilder) AppendArray(arr arrow.Array) error { return nil } +func (b *dictionaryBuilder) IndexBuilder() IndexBuilder { + return b.idxBuilder +} + func (b *dictionaryBuilder) AppendIndices(indices []int, valid []bool) { b.length += len(indices) switch idxbldr := b.idxBuilder.Builder.(type) { @@ -1237,16 +1301,18 @@ func (b *BinaryDictionaryBuilder) Append(v []byte) error { b.AppendNull() return nil } - return b.appendValue(v) + + return b.appendBytes(v) } -func (b *BinaryDictionaryBuilder) AppendString(v string) error { return b.appendValue(v) } + +func (b *BinaryDictionaryBuilder) AppendString(v string) error { return b.appendBytes([]byte(v)) } func (b *BinaryDictionaryBuilder) InsertDictValues(arr *Binary) (err error) { if !arrow.TypeEqual(arr.DataType(), b.dt.ValueType) { return fmt.Errorf("dictionary insert type mismatch: cannot insert values of type %T to dictionary type %T", arr.DataType(), b.dt.ValueType) } for i := 0; i < arr.Len(); i++ { - if err = b.insertDictValue(arr.Value(i)); err != nil { + if err = b.insertDictBytes(arr.Value(i)); err != nil { break } } @@ -1265,6 +1331,41 @@ func (b *BinaryDictionaryBuilder) InsertStringDictValues(arr *String) (err error return } +func (b *BinaryDictionaryBuilder) GetValueIndex(i int) int { + switch b := b.idxBuilder.Builder.(type) { + case *Uint8Builder: + return int(b.Value(i)) + case *Int8Builder: + return int(b.Value(i)) + case *Uint16Builder: + return int(b.Value(i)) + case *Int16Builder: + return int(b.Value(i)) + case *Uint32Builder: + return int(b.Value(i)) + case *Int32Builder: + return int(b.Value(i)) + case *Uint64Builder: + return int(b.Value(i)) + case *Int64Builder: + return int(b.Value(i)) + default: + return -1 + } +} + +func (b *BinaryDictionaryBuilder) Value(i int) []byte { + switch mt := b.memoTable.(type) { + case *hashing.BinaryMemoTable: + return mt.Value(i) + } + return nil +} + +func (b *BinaryDictionaryBuilder) ValueStr(i int) string { + return string(b.Value(i)) +} + type FixedSizeBinaryDictionaryBuilder struct { dictionaryBuilder byteWidth int @@ -1310,7 +1411,7 @@ type Decimal256DictionaryBuilder struct { dictionaryBuilder } -func (b *Decimal256DictionaryBuilder) Append(v decimal128.Num) error { +func (b *Decimal256DictionaryBuilder) Append(v decimal256.Num) error { return b.appendValue((*(*[arrow.Decimal256SizeBytes]byte)(unsafe.Pointer(&v)))[:]) } func (b *Decimal256DictionaryBuilder) InsertDictValues(arr *Decimal256) (err error) { @@ -1577,6 +1678,135 @@ func (u *unifier) GetResultWithIndexType(indexType arrow.DataType) (arrow.Array, return MakeFromData(dictData), nil } +type binaryUnifier struct { + mem memory.Allocator + memoTable *hashing.BinaryMemoTable +} + +// NewBinaryDictionaryUnifier constructs and returns a new dictionary unifier for dictionaries +// of binary values, using the provided allocator for allocating the unified dictionary +// and the memotable used for building it. +func NewBinaryDictionaryUnifier(alloc memory.Allocator) DictionaryUnifier { + return &binaryUnifier{ + mem: alloc, + memoTable: hashing.NewBinaryMemoTable(0, 0, NewBinaryBuilder(alloc, arrow.BinaryTypes.Binary)), + } +} + +func (u *binaryUnifier) Release() { + u.memoTable.Release() +} + +func (u *binaryUnifier) Unify(dict arrow.Array) (err error) { + if !arrow.TypeEqual(arrow.BinaryTypes.Binary, dict.DataType()) { + return fmt.Errorf("dictionary type different from unifier: %s, expected: %s", dict.DataType(), arrow.BinaryTypes.Binary) + } + + typedDict := dict.(*Binary) + for i := 0; i < dict.Len(); i++ { + if dict.IsNull(i) { + u.memoTable.GetOrInsertNull() + continue + } + + if _, _, err = u.memoTable.GetOrInsertBytes(typedDict.Value(i)); err != nil { + return err + } + } + return +} + +func (u *binaryUnifier) UnifyAndTranspose(dict arrow.Array) (transposed *memory.Buffer, err error) { + if !arrow.TypeEqual(arrow.BinaryTypes.Binary, dict.DataType()) { + return nil, fmt.Errorf("dictionary type different from unifier: %s, expected: %s", dict.DataType(), arrow.BinaryTypes.Binary) + } + + transposed = memory.NewResizableBuffer(u.mem) + transposed.Resize(arrow.Int32Traits.BytesRequired(dict.Len())) + + newIdxes := arrow.Int32Traits.CastFromBytes(transposed.Bytes()) + typedDict := dict.(*Binary) + for i := 0; i < dict.Len(); i++ { + if dict.IsNull(i) { + idx, _ := u.memoTable.GetOrInsertNull() + newIdxes[i] = int32(idx) + continue + } + + idx, _, err := u.memoTable.GetOrInsertBytes(typedDict.Value(i)) + if err != nil { + transposed.Release() + return nil, err + } + newIdxes[i] = int32(idx) + } + return +} + +func (u *binaryUnifier) GetResult() (outType arrow.DataType, outDict arrow.Array, err error) { + dictLen := u.memoTable.Size() + var indexType arrow.DataType + switch { + case dictLen <= math.MaxInt8: + indexType = arrow.PrimitiveTypes.Int8 + case dictLen <= math.MaxInt16: + indexType = arrow.PrimitiveTypes.Int16 + case dictLen <= math.MaxInt32: + indexType = arrow.PrimitiveTypes.Int32 + default: + indexType = arrow.PrimitiveTypes.Int64 + } + outType = &arrow.DictionaryType{IndexType: indexType, ValueType: arrow.BinaryTypes.Binary} + + dictData, err := GetDictArrayData(u.mem, arrow.BinaryTypes.Binary, u.memoTable, 0) + if err != nil { + return nil, nil, err + } + + u.memoTable.Reset() + + defer dictData.Release() + outDict = MakeFromData(dictData) + return +} + +func (u *binaryUnifier) GetResultWithIndexType(indexType arrow.DataType) (arrow.Array, error) { + dictLen := u.memoTable.Size() + var toobig bool + switch indexType.ID() { + case arrow.UINT8: + toobig = dictLen > math.MaxUint8 + case arrow.INT8: + toobig = dictLen > math.MaxInt8 + case arrow.UINT16: + toobig = dictLen > math.MaxUint16 + case arrow.INT16: + toobig = dictLen > math.MaxInt16 + case arrow.UINT32: + toobig = uint(dictLen) > math.MaxUint32 + case arrow.INT32: + toobig = dictLen > math.MaxInt32 + case arrow.UINT64: + toobig = uint64(dictLen) > uint64(math.MaxUint64) + case arrow.INT64: + default: + return nil, fmt.Errorf("arrow/array: invalid dictionary index type: %s, must be integral", indexType) + } + if toobig { + return nil, errors.New("arrow/array: cannot combine dictionaries. unified dictionary requires a larger index type") + } + + dictData, err := GetDictArrayData(u.mem, arrow.BinaryTypes.Binary, u.memoTable, 0) + if err != nil { + return nil, err + } + + u.memoTable.Reset() + + defer dictData.Release() + return MakeFromData(dictData), nil +} + func unifyRecursive(mem memory.Allocator, typ arrow.DataType, chunks []*Data) (changed bool, err error) { debug.Assert(len(chunks) != 0, "must provide non-zero length chunk slice") var extType arrow.DataType diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/diff.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/diff.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go index 32030173..026a27b9 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/diff.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/diff.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) // Edit represents one entry in the edit script to compare two arrays. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/doc.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/doc.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/encoded.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go similarity index 80% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/encoded.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go index 2668c908..bf4a942c 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/encoded.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/encoded.go @@ -23,12 +23,12 @@ import ( "reflect" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/encoded" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/internal/utils" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/encoded" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" + "github.com/apache/arrow/go/v14/internal/utils" ) // RunEndEncoded represents an array containing two children: @@ -74,22 +74,24 @@ func (r *RunEndEncoded) Release() { // run, only over the range of run values inside the logical offset/length // range of the parent array. // -// Example +// # Example // // For this array: -// RunEndEncoded: { Offset: 150, Length: 1500 } -// RunEnds: [ 1, 2, 4, 6, 10, 1000, 1750, 2000 ] -// Values: [ "a", "b", "c", "d", "e", "f", "g", "h" ] +// +// RunEndEncoded: { Offset: 150, Length: 1500 } +// RunEnds: [ 1, 2, 4, 6, 10, 1000, 1750, 2000 ] +// Values: [ "a", "b", "c", "d", "e", "f", "g", "h" ] // // LogicalValuesArray will return the following array: -// [ "f", "g" ] +// +// [ "f", "g" ] // // This is because the offset of 150 tells it to skip the values until // "f" which corresponds with the logical offset (the run from 10 - 1000), // and stops after "g" because the length + offset goes to 1650 which is // within the run from 1000 - 1750, corresponding to the "g" value. // -// Note +// # Note // // The return from this needs to be Released. func (r *RunEndEncoded) LogicalValuesArray() arrow.Array { @@ -109,15 +111,17 @@ func (r *RunEndEncoded) LogicalValuesArray() arrow.Array { // that are adjusted so the new array can have an offset of 0. As a result // this method can be expensive to call for an array with a non-zero offset. // -// Example +// # Example // // For this array: -// RunEndEncoded: { Offset: 150, Length: 1500 } -// RunEnds: [ 1, 2, 4, 6, 10, 1000, 1750, 2000 ] -// Values: [ "a", "b", "c", "d", "e", "f", "g", "h" ] +// +// RunEndEncoded: { Offset: 150, Length: 1500 } +// RunEnds: [ 1, 2, 4, 6, 10, 1000, 1750, 2000 ] +// Values: [ "a", "b", "c", "d", "e", "f", "g", "h" ] // // LogicalRunEndsArray will return the following array: -// [ 850, 1500 ] +// +// [ 850, 1500 ] // // This is because the offset of 150 tells us to skip all run-ends less // than 150 (by finding the physical offset), and we adjust the run-ends @@ -125,7 +129,7 @@ func (r *RunEndEncoded) LogicalValuesArray() arrow.Array { // so we know we don't want to go past the 1750 run end. Thus the last // run-end is determined by doing: min(1750 - 150, 1500) = 1500. // -// Note +// # Note // // The return from this needs to be Released func (r *RunEndEncoded) LogicalRunEndsArray(mem memory.Allocator) arrow.Array { @@ -192,6 +196,19 @@ func (r *RunEndEncoded) GetPhysicalLength() int { return encoded.GetPhysicalLength(r.data) } +// GetPhysicalIndex can be used to get the run-encoded value instead of costly LogicalValuesArray +// in the following way: +// +// r.Values().(valuetype).Value(r.GetPhysicalIndex(i)) +func (r *RunEndEncoded) GetPhysicalIndex(i int) int { + return encoded.FindPhysicalIndex(r.data, i+r.data.offset) +} + +// ValueStr will return the str representation of the value at the logical offset i. +func (r *RunEndEncoded) ValueStr(i int) string { + return r.values.ValueStr(r.GetPhysicalIndex(i)) +} + func (r *RunEndEncoded) String() string { var buf bytes.Buffer buf.WriteByte('[') @@ -204,9 +221,7 @@ func (r *RunEndEncoded) String() string { if byts, ok := value.(json.RawMessage); ok { value = string(byts) } - fmt.Fprintf(&buf, "{%d -> %v}", - r.ends.GetOneForMarshal(i), - value) + fmt.Fprintf(&buf, "{%d -> %v}", r.ends.GetOneForMarshal(i), value) } buf.WriteByte(']') @@ -214,8 +229,7 @@ func (r *RunEndEncoded) String() string { } func (r *RunEndEncoded) GetOneForMarshal(i int) interface{} { - physIndex := encoded.FindPhysicalIndex(r.data, i+r.data.offset) - return r.values.GetOneForMarshal(physIndex) + return r.values.GetOneForMarshal(r.GetPhysicalIndex(i)) } func (r *RunEndEncoded) MarshalJSON() ([]byte, error) { @@ -270,7 +284,10 @@ type RunEndEncodedBuilder struct { values Builder maxRunEnd uint64 + // currently, mixing AppendValueFromString & UnmarshalOne is unsupported lastUnmarshalled interface{} + unmarshalled bool // tracks if Unmarshal was called (in case lastUnmarshalled is nil) + lastStr *string } func NewRunEndEncodedBuilder(mem memory.Allocator, runEnds, encoded arrow.DataType) *RunEndEncodedBuilder { @@ -321,6 +338,8 @@ func (b *RunEndEncodedBuilder) addLength(n uint64) { func (b *RunEndEncodedBuilder) finishRun() { b.lastUnmarshalled = nil + b.lastStr = nil + b.unmarshalled = false if b.length == 0 { return } @@ -336,25 +355,35 @@ func (b *RunEndEncodedBuilder) finishRun() { } func (b *RunEndEncodedBuilder) ValueBuilder() Builder { return b.values } + func (b *RunEndEncodedBuilder) Append(n uint64) { b.finishRun() b.addLength(n) } + func (b *RunEndEncodedBuilder) AppendRuns(runs []uint64) { for _, r := range runs { b.finishRun() b.addLength(r) } } + func (b *RunEndEncodedBuilder) ContinueRun(n uint64) { b.addLength(n) } + func (b *RunEndEncodedBuilder) AppendNull() { b.finishRun() b.values.AppendNull() b.addLength(1) } +func (b *RunEndEncodedBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *RunEndEncodedBuilder) NullN() int { return UnknownNullCount } @@ -363,6 +392,10 @@ func (b *RunEndEncodedBuilder) AppendEmptyValue() { b.AppendNull() } +func (b *RunEndEncodedBuilder) AppendEmptyValues(n int) { + b.AppendNulls(n) +} + func (b *RunEndEncodedBuilder) Reserve(n int) { b.values.Reserve(n) b.runEnds.Reserve(n) @@ -391,13 +424,42 @@ func (b *RunEndEncodedBuilder) newData() (data *Data) { defer runEnds.Release() data = NewData( - b.dt, b.length, []*memory.Buffer{nil}, + b.dt, b.length, []*memory.Buffer{}, []arrow.ArrayData{runEnds.Data(), values.Data()}, 0, 0) b.reset() return } +// AppendValueFromString can't be used in conjunction with UnmarshalOne +func (b *RunEndEncodedBuilder) AppendValueFromString(s string) error { + // we don't support mixing AppendValueFromString & UnmarshalOne + if b.unmarshalled { + return fmt.Errorf("%w: mixing AppendValueFromString & UnmarshalOne not yet implemented", arrow.ErrNotImplemented) + } + + if s == NullValueStr { + b.AppendNull() + return nil + } + + if b.lastStr != nil && s == *b.lastStr { + b.ContinueRun(1) + return nil + } + + b.Append(1) + lastStr := s + b.lastStr = &lastStr + return b.ValueBuilder().AppendValueFromString(s) +} + +// UnmarshalOne can't be used in conjunction with AppendValueFromString func (b *RunEndEncodedBuilder) UnmarshalOne(dec *json.Decoder) error { + // we don't support mixing AppendValueFromString & UnmarshalOne + if b.lastStr != nil { + return fmt.Errorf("%w: mixing AppendValueFromString & UnmarshalOne not yet implemented", arrow.ErrNotImplemented) + } + var value interface{} if err := dec.Decode(&value); err != nil { return err @@ -422,9 +484,11 @@ func (b *RunEndEncodedBuilder) UnmarshalOne(dec *json.Decoder) error { b.Append(1) b.lastUnmarshalled = value + b.unmarshalled = true return b.ValueBuilder().UnmarshalOne(json.NewDecoder(bytes.NewReader(data))) } +// Unmarshal can't be used in conjunction with AppendValueFromString (as it calls UnmarshalOne) func (b *RunEndEncodedBuilder) Unmarshal(dec *json.Decoder) error { b.finishRun() for dec.More() { @@ -435,6 +499,7 @@ func (b *RunEndEncodedBuilder) Unmarshal(dec *json.Decoder) error { return nil } +// UnmarshalJSON can't be used in conjunction with AppendValueFromString (as it calls UnmarshalOne) func (b *RunEndEncodedBuilder) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) t, err := dec.Token() diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/extension.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go similarity index 82% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/extension.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go index 3ad4ec57..03e8c173 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/extension.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/extension.go @@ -20,9 +20,9 @@ import ( "fmt" "reflect" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // ExtensionArray is the interface that needs to be implemented to handle @@ -38,8 +38,6 @@ type ExtensionArray interface { ExtensionType() arrow.ExtensionType // Storage returns the underlying storage array for this array. Storage() arrow.Array - // ValueString returns a string represenation of the value at the given index for the extension array. - ValueString(i int) string // by having a non-exported function in the interface, it means that // consumers must embed ExtensionArrayBase in their structs in order // to fulfill this interface. @@ -76,28 +74,12 @@ func NewExtensionArrayWithStorage(dt arrow.ExtensionType, storage arrow.Array) a panic(fmt.Errorf("arrow/array: storage type %s for extension type %s, does not match expected type %s", storage.DataType(), dt.ExtensionName(), dt.StorageType())) } - base := ExtensionArrayBase{} - base.refCount = 1 - base.storage = storage - storage.Retain() - storageData := storage.Data().(*Data) // create a new data instance with the ExtensionType as the datatype but referencing the // same underlying buffers to share them with the storage array. - baseData := NewData(dt, storageData.length, storageData.buffers, storageData.childData, storageData.nulls, storageData.offset) - defer baseData.Release() - base.array.setData(baseData) - - // use the ExtensionType's ArrayType to construct the correctly typed object - // to use as the ExtensionArray interface. reflect.New returns a pointer to - // the newly created object. - arr := reflect.New(base.ExtensionType().ArrayType()) - // set the embedded ExtensionArrayBase to the value we created above. We know - // that this field will exist because the interface requires embedding ExtensionArrayBase - // so we don't have to separately check, this will panic if called on an ArrayType - // that doesn't embed ExtensionArrayBase which is what we want. - arr.Elem().FieldByName("ExtensionArrayBase").Set(reflect.ValueOf(base)) - return arr.Interface().(ExtensionArray) + data := NewData(dt, storageData.length, storageData.buffers, storageData.childData, storageData.nulls, storageData.offset) + defer data.Release() + return NewExtensionData(data) } // NewExtensionData expects a data with a datatype of arrow.ExtensionType and @@ -122,10 +104,9 @@ func NewExtensionData(data arrow.ArrayData) ExtensionArray { // ExtensionArrayBase is the base struct for user-defined Extension Array types // and must be embedded in any user-defined extension arrays like so: // -// type UserDefinedArray struct { -// array.ExtensionArrayBase -// } -// +// type UserDefinedArray struct { +// array.ExtensionArrayBase +// } type ExtensionArrayBase struct { array storage arrow.Array @@ -185,10 +166,10 @@ func (e *ExtensionArrayBase) setData(data *Data) { e.storage = MakeFromData(storageData) } -// ValueString returns the value at index i as a string. +// ValueStr returns the value at index i as a string. // This needs to be implemented by the extension array type. -func (e *ExtensionArrayBase) ValueString(i int) string { - panic("arrow/array: ValueString wasn't implemented by this extension array type") +func (e *ExtensionArrayBase) ValueStr(i int) string { + panic("arrow/array: ValueStr wasn't implemented by this extension array type") } // no-op function that exists simply to force embedding this in any extension array types. @@ -214,18 +195,18 @@ type ExtensionBuilder struct { // the appropriate extension array type and set the storage correctly, resetting the builder for // reuse. // -// Example +// # Example // // Simple example assuming an extension type of a UUID defined as a FixedSizeBinary(16) was registered // using the type name "uuid": // -// uuidType := arrow.GetExtensionType("uuid") -// bldr := array.NewExtensionBuilder(memory.DefaultAllocator, uuidType) -// defer bldr.Release() -// uuidBldr := bldr.StorageBuilder().(*array.FixedSizeBinaryBuilder) -// /* build up the fixed size binary array as usual via Append/AppendValues */ -// uuidArr := bldr.NewExtensionArray() -// defer uuidArr.Release() +// uuidType := arrow.GetExtensionType("uuid") +// bldr := array.NewExtensionBuilder(memory.DefaultAllocator, uuidType) +// defer bldr.Release() +// uuidBldr := bldr.StorageBuilder().(*array.FixedSizeBinaryBuilder) +// /* build up the fixed size binary array as usual via Append/AppendValues */ +// uuidArr := bldr.NewExtensionArray() +// defer uuidArr.Release() // // Because the storage builder is embedded in the Extension builder it also means // that any of the functions available on the Builder interface can be called on diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/extension_builder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/extension_builder.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/extension_builder.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/extension_builder.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/fixed_size_list.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go similarity index 85% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/fixed_size_list.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go index c79573da..62c32138 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/fixed_size_list.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixed_size_list.go @@ -22,11 +22,11 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // FixedSizeList represents an immutable sequence of N array values. @@ -36,6 +36,8 @@ type FixedSizeList struct { values arrow.Array } +var _ ListLike = (*FixedSizeList)(nil) + // NewFixedSizeListData returns a new List array value, from data. func NewFixedSizeListData(data arrow.ArrayData) *FixedSizeList { a := &FixedSizeList{} @@ -46,6 +48,12 @@ func NewFixedSizeListData(data arrow.ArrayData) *FixedSizeList { func (a *FixedSizeList) ListValues() arrow.Array { return a.values } +func (a *FixedSizeList) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return string(a.GetOneForMarshal(i).(json.RawMessage)) +} func (a *FixedSizeList) String() string { o := new(strings.Builder) o.WriteString("[") @@ -54,7 +62,7 @@ func (a *FixedSizeList) String() string { o.WriteString(" ") } if !a.IsValid(i) { - o.WriteString("(null)") + o.WriteString(NullValueStr) continue } sub := a.newListValue(i) @@ -66,12 +74,8 @@ func (a *FixedSizeList) String() string { } func (a *FixedSizeList) newListValue(i int) arrow.Array { - n := int64(a.n) - off := int64(a.array.data.offset) - beg := (off + int64(i)) * n - end := (off + int64(i+1)) * n - sli := NewSlice(a.values, beg, end) - return sli + beg, end := a.ValueOffsets(i) + return NewSlice(a.values, beg, end) } func (a *FixedSizeList) setData(data *Data) { @@ -102,6 +106,13 @@ func arrayEqualFixedSizeList(left, right *FixedSizeList) bool { // Len returns the number of elements in the array. func (a *FixedSizeList) Len() int { return a.array.Len() } +func (a *FixedSizeList) ValueOffsets(i int) (start, end int64) { + n := int64(a.n) + off := int64(a.array.data.offset) + start, end = (off+int64(i))*n, (off+int64(i+1))*n + return +} + func (a *FixedSizeList) Retain() { a.array.Retain() a.values.Retain() @@ -193,9 +204,21 @@ func (b *FixedSizeListBuilder) Append(v bool) { b.unsafeAppendBoolToBitmap(v) } +// AppendNull will append null values to the underlying values by itself func (b *FixedSizeListBuilder) AppendNull() { b.Reserve(1) b.unsafeAppendBoolToBitmap(false) + // require to append this due to value indexes + for i := int32(0); i < b.n; i++ { + b.values.AppendNull() + } +} + +// AppendNulls will append n null values to the underlying values by itself +func (b *FixedSizeListBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } } func (b *FixedSizeListBuilder) AppendEmptyValue() { @@ -205,6 +228,12 @@ func (b *FixedSizeListBuilder) AppendEmptyValue() { } } +func (b *FixedSizeListBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *FixedSizeListBuilder) AppendValues(valid []bool) { b.Reserve(len(valid)) b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) @@ -278,6 +307,15 @@ func (b *FixedSizeListBuilder) newData() (data *Data) { return } +func (b *FixedSizeListBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + dec := json.NewDecoder(strings.NewReader(s)) + return b.UnmarshalOne(dec) +} + func (b *FixedSizeListBuilder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -295,9 +333,6 @@ func (b *FixedSizeListBuilder) UnmarshalOne(dec *json.Decoder) error { return err case nil: b.AppendNull() - for i := int32(0); i < b.n; i++ { - b.values.AppendNull() - } default: return &json.UnmarshalTypeError{ Value: fmt.Sprint(t), diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binary.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go similarity index 90% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binary.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go index 3c014c97..5466156d 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binary.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binary.go @@ -18,11 +18,12 @@ package array import ( "bytes" + "encoding/base64" "fmt" "strings" - "github.com/apache/arrow/go/v12/arrow" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/internal/json" ) // A type which represents an immutable sequence of fixed-length binary strings. @@ -51,6 +52,12 @@ func (a *FixedSizeBinary) Value(i int) []byte { ) return a.valueBytes[beg:end] } +func (a *FixedSizeBinary) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return base64.StdEncoding.EncodeToString(a.Value(i)) +} func (a *FixedSizeBinary) String() string { o := new(strings.Builder) @@ -61,7 +68,7 @@ func (a *FixedSizeBinary) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%q", a.Value(i)) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binarybuilder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binarybuilder.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go index 260d8b68..ba4b474a 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/fixedsize_binarybuilder.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/fixedsize_binarybuilder.go @@ -23,10 +23,10 @@ import ( "reflect" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // A FixedSizeBinaryBuilder is used to build a FixedSizeBinary array using the Append methods. @@ -83,12 +83,24 @@ func (b *FixedSizeBinaryBuilder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *FixedSizeBinaryBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *FixedSizeBinaryBuilder) AppendEmptyValue() { b.Reserve(1) b.values.Advance(b.dtype.ByteWidth) b.UnsafeAppendBoolToBitmap(true) } +func (b *FixedSizeBinaryBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *FixedSizeBinaryBuilder) UnsafeAppend(v []byte) { b.values.unsafeAppend(v) b.UnsafeAppendBoolToBitmap(true) @@ -166,6 +178,21 @@ func (b *FixedSizeBinaryBuilder) newData() (data *Data) { return } +func (b *FixedSizeBinaryBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b.AppendNull() + return err + } + b.Append(data) + return nil +} + func (b *FixedSizeBinaryBuilder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -175,7 +202,7 @@ func (b *FixedSizeBinaryBuilder) UnmarshalOne(dec *json.Decoder) error { var val []byte switch v := t.(type) { case string: - data, err := base64.RawStdEncoding.DecodeString(v) + data, err := base64.StdEncoding.DecodeString(v) if err != nil { return err } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/float16.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/float16.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go index b4ed3150..de499e26 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/float16.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/float16.go @@ -20,9 +20,9 @@ import ( "fmt" "strings" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/float16" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow/go/v14/internal/json" ) // A type which represents an immutable sequence of Float16 values. @@ -39,6 +39,12 @@ func NewFloat16Data(data arrow.ArrayData) *Float16 { } func (a *Float16) Value(i int) float16.Num { return a.values[i] } +func (a *Float16) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.Value(i).String() +} func (a *Float16) Values() []float16.Num { return a.values } @@ -51,7 +57,7 @@ func (a *Float16) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", a.values[i].Float32()) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/float16_builder.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/float16_builder.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go index 06ae2c4d..f96ab603 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/float16_builder.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/float16_builder.go @@ -23,12 +23,12 @@ import ( "strconv" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/float16" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) type Float16Builder struct { @@ -78,11 +78,23 @@ func (b *Float16Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Float16Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Float16Builder) AppendEmptyValue() { b.Reserve(1) b.UnsafeAppend(float16.Num{}) } +func (b *Float16Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Float16Builder) UnsafeAppendBoolToBitmap(isValid bool) { if isValid { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) @@ -176,6 +188,20 @@ func (b *Float16Builder) newData() (data *Data) { return } +func (b *Float16Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseFloat(s, 32) + if err != nil { + b.AppendNull() + return err + } + b.Append(float16.New(float32(v))) + return nil +} + func (b *Float16Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/interval.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/interval.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go index d5844ea3..ff059c92 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/interval.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/interval.go @@ -19,14 +19,15 @@ package array import ( "bytes" "fmt" + "strconv" "strings" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) func NewIntervalData(data arrow.ArrayData) arrow.Array { @@ -55,7 +56,13 @@ func NewMonthIntervalData(data arrow.ArrayData) *MonthInterval { return a } -func (a *MonthInterval) Value(i int) arrow.MonthInterval { return a.values[i] } +func (a *MonthInterval) Value(i int) arrow.MonthInterval { return a.values[i] } +func (a *MonthInterval) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return fmt.Sprintf("%v", a.Value(i)) +} func (a *MonthInterval) MonthIntervalValues() []arrow.MonthInterval { return a.values } func (a *MonthInterval) String() string { @@ -67,7 +74,7 @@ func (a *MonthInterval) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -166,10 +173,22 @@ func (b *MonthIntervalBuilder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *MonthIntervalBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *MonthIntervalBuilder) AppendEmptyValue() { b.Append(arrow.MonthInterval(0)) } +func (b *MonthIntervalBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *MonthIntervalBuilder) UnsafeAppend(v arrow.MonthInterval) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -267,6 +286,20 @@ func (b *MonthIntervalBuilder) newData() (data *Data) { return } +func (b *MonthIntervalBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.MonthInterval(v)) + return nil +} + func (b *MonthIntervalBuilder) UnmarshalOne(dec *json.Decoder) error { var v *arrow.MonthInterval if err := dec.Decode(&v); err != nil { @@ -320,7 +353,18 @@ func NewDayTimeIntervalData(data arrow.ArrayData) *DayTimeInterval { return a } -func (a *DayTimeInterval) Value(i int) arrow.DayTimeInterval { return a.values[i] } +func (a *DayTimeInterval) Value(i int) arrow.DayTimeInterval { return a.values[i] } +func (a *DayTimeInterval) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + data, err := json.Marshal(a.GetOneForMarshal(i)) + if err != nil { + panic(err) + } + return string(data) +} + func (a *DayTimeInterval) DayTimeIntervalValues() []arrow.DayTimeInterval { return a.values } func (a *DayTimeInterval) String() string { @@ -332,7 +376,7 @@ func (a *DayTimeInterval) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -429,10 +473,22 @@ func (b *DayTimeIntervalBuilder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *DayTimeIntervalBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *DayTimeIntervalBuilder) AppendEmptyValue() { b.Append(arrow.DayTimeInterval{}) } +func (b *DayTimeIntervalBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *DayTimeIntervalBuilder) UnsafeAppend(v arrow.DayTimeInterval) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -530,6 +586,20 @@ func (b *DayTimeIntervalBuilder) newData() (data *Data) { return } +func (b *DayTimeIntervalBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + var v arrow.DayTimeInterval + if err := json.Unmarshal([]byte(s), &v); err != nil { + b.AppendNull() + return err + } + b.Append(v) + return nil +} + func (b *DayTimeIntervalBuilder) UnmarshalOne(dec *json.Decoder) error { var v *arrow.DayTimeInterval if err := dec.Decode(&v); err != nil { @@ -583,6 +653,17 @@ func NewMonthDayNanoIntervalData(data arrow.ArrayData) *MonthDayNanoInterval { } func (a *MonthDayNanoInterval) Value(i int) arrow.MonthDayNanoInterval { return a.values[i] } +func (a *MonthDayNanoInterval) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + data, err := json.Marshal(a.GetOneForMarshal(i)) + if err != nil { + panic(err) + } + return string(data) +} + func (a *MonthDayNanoInterval) MonthDayNanoIntervalValues() []arrow.MonthDayNanoInterval { return a.values } @@ -596,7 +677,7 @@ func (a *MonthDayNanoInterval) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -695,10 +776,22 @@ func (b *MonthDayNanoIntervalBuilder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *MonthDayNanoIntervalBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *MonthDayNanoIntervalBuilder) AppendEmptyValue() { b.Append(arrow.MonthDayNanoInterval{}) } +func (b *MonthDayNanoIntervalBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *MonthDayNanoIntervalBuilder) UnsafeAppend(v arrow.MonthDayNanoInterval) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -796,6 +889,19 @@ func (b *MonthDayNanoIntervalBuilder) newData() (data *Data) { return } +func (b *MonthDayNanoIntervalBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + var v arrow.MonthDayNanoInterval + if err := json.Unmarshal([]byte(s), &v); err != nil { + return err + } + b.Append(v) + return nil +} + func (b *MonthDayNanoIntervalBuilder) UnmarshalOne(dec *json.Decoder) error { var v *arrow.MonthDayNanoInterval if err := dec.Decode(&v); err != nil { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/json_reader.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/json_reader.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go index 100fdd91..e09717c4 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/json_reader.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/json_reader.go @@ -22,10 +22,10 @@ import ( "io" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) type Option func(config) diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/list.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/list.go new file mode 100644 index 00000000..d8d8b8c7 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/list.go @@ -0,0 +1,1688 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "math" + "strings" + "sync/atomic" + + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" +) + +type ListLike interface { + arrow.Array + ListValues() arrow.Array + ValueOffsets(i int) (start, end int64) +} + +type VarLenListLike interface { + ListLike +} + +// List represents an immutable sequence of array values. +type List struct { + array + values arrow.Array + offsets []int32 +} + +var _ ListLike = (*List)(nil) + +// NewListData returns a new List array value, from data. +func NewListData(data arrow.ArrayData) *List { + a := &List{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *List) ListValues() arrow.Array { return a.values } + +func (a *List) ValueStr(i int) string { + if !a.IsValid(i) { + return NullValueStr + } + return string(a.GetOneForMarshal(i).(json.RawMessage)) +} + +func (a *List) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + if a.IsNull(i) { + o.WriteString(NullValueStr) + continue + } + sub := a.newListValue(i) + fmt.Fprintf(o, "%v", sub) + sub.Release() + } + o.WriteString("]") + return o.String() +} + +func (a *List) newListValue(i int) arrow.Array { + beg, end := a.ValueOffsets(i) + return NewSlice(a.values, beg, end) +} + +func (a *List) setData(data *Data) { + debug.Assert(len(data.buffers) >= 2, "list data should have 2 buffers") + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.offsets = arrow.Int32Traits.CastFromBytes(vals.Bytes()) + } + a.values = MakeFromData(data.childData[0]) +} + +func (a *List) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + slice := a.newListValue(i) + defer slice.Release() + v, err := json.Marshal(slice) + if err != nil { + panic(err) + } + return json.RawMessage(v) +} + +func (a *List) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func arrayEqualList(left, right *List) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return Equal(l, r) + }() + if !o { + return false + } + } + return true +} + +// Len returns the number of elements in the array. +func (a *List) Len() int { return a.array.Len() } + +func (a *List) Offsets() []int32 { return a.offsets } + +func (a *List) Retain() { + a.array.Retain() + a.values.Retain() +} + +func (a *List) Release() { + a.array.Release() + a.values.Release() +} + +func (a *List) ValueOffsets(i int) (start, end int64) { + debug.Assert(i >= 0 && i < a.array.data.length, "index out of range") + j := i + a.array.data.offset + start, end = int64(a.offsets[j]), int64(a.offsets[j+1]) + return +} + +// LargeList represents an immutable sequence of array values. +type LargeList struct { + array + values arrow.Array + offsets []int64 +} + +var _ ListLike = (*LargeList)(nil) + +// NewLargeListData returns a new LargeList array value, from data. +func NewLargeListData(data arrow.ArrayData) *LargeList { + a := new(LargeList) + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *LargeList) ListValues() arrow.Array { return a.values } + +func (a *LargeList) ValueStr(i int) string { + if !a.IsValid(i) { + return NullValueStr + } + return string(a.GetOneForMarshal(i).(json.RawMessage)) +} + +func (a *LargeList) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + if a.IsNull(i) { + o.WriteString(NullValueStr) + continue + } + sub := a.newListValue(i) + fmt.Fprintf(o, "%v", sub) + sub.Release() + } + o.WriteString("]") + return o.String() +} + +func (a *LargeList) newListValue(i int) arrow.Array { + beg, end := a.ValueOffsets(i) + return NewSlice(a.values, beg, end) +} + +func (a *LargeList) setData(data *Data) { + debug.Assert(len(data.buffers) >= 2, "list data should have 2 buffers") + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.offsets = arrow.Int64Traits.CastFromBytes(vals.Bytes()) + } + a.values = MakeFromData(data.childData[0]) +} + +func (a *LargeList) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + slice := a.newListValue(i) + defer slice.Release() + v, err := json.Marshal(slice) + if err != nil { + panic(err) + } + return json.RawMessage(v) +} + +func (a *LargeList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func arrayEqualLargeList(left, right *LargeList) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return Equal(l, r) + }() + if !o { + return false + } + } + return true +} + +// Len returns the number of elements in the array. +func (a *LargeList) Len() int { return a.array.Len() } + +func (a *LargeList) Offsets() []int64 { return a.offsets } + +func (a *LargeList) ValueOffsets(i int) (start, end int64) { + debug.Assert(i >= 0 && i < a.array.data.length, "index out of range") + j := i + a.array.data.offset + start, end = a.offsets[j], a.offsets[j+1] + return +} + +func (a *LargeList) Retain() { + a.array.Retain() + a.values.Retain() +} + +func (a *LargeList) Release() { + a.array.Release() + a.values.Release() +} + +type baseListBuilder struct { + builder + + values Builder // value builder for the list's elements. + offsets Builder + + // actual list type + dt arrow.DataType + appendOffsetVal func(int) +} + +type ListLikeBuilder interface { + Builder + ValueBuilder() Builder + Append(bool) +} + +type VarLenListLikeBuilder interface { + ListLikeBuilder + AppendWithSize(bool, int) +} + +type ListBuilder struct { + baseListBuilder +} + +type LargeListBuilder struct { + baseListBuilder +} + +// NewListBuilder returns a builder, using the provided memory allocator. +// The created list builder will create a list whose elements will be of type etype. +func NewListBuilder(mem memory.Allocator, etype arrow.DataType) *ListBuilder { + offsetBldr := NewInt32Builder(mem) + return &ListBuilder{ + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, etype), + offsets: offsetBldr, + dt: arrow.ListOf(etype), + appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) }, + }, + } +} + +// NewListBuilderWithField takes a field to use for the child rather than just +// a datatype to allow for more customization. +func NewListBuilderWithField(mem memory.Allocator, field arrow.Field) *ListBuilder { + offsetBldr := NewInt32Builder(mem) + return &ListBuilder{ + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, field.Type), + offsets: offsetBldr, + dt: arrow.ListOfField(field), + appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) }, + }, + } +} + +func (b *baseListBuilder) Type() arrow.DataType { + switch dt := b.dt.(type) { + case *arrow.ListType: + f := dt.ElemField() + f.Type = b.values.Type() + return arrow.ListOfField(f) + case *arrow.LargeListType: + f := dt.ElemField() + f.Type = b.values.Type() + return arrow.LargeListOfField(f) + } + return nil +} + +// NewLargeListBuilder returns a builder, using the provided memory allocator. +// The created list builder will create a list whose elements will be of type etype. +func NewLargeListBuilder(mem memory.Allocator, etype arrow.DataType) *LargeListBuilder { + offsetBldr := NewInt64Builder(mem) + return &LargeListBuilder{ + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, etype), + offsets: offsetBldr, + dt: arrow.LargeListOf(etype), + appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) }, + }, + } +} + +// NewLargeListBuilderWithField takes a field rather than just an element type +// to allow for more customization of the final type of the LargeList Array +func NewLargeListBuilderWithField(mem memory.Allocator, field arrow.Field) *LargeListBuilder { + offsetBldr := NewInt64Builder(mem) + return &LargeListBuilder{ + baseListBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, field.Type), + offsets: offsetBldr, + dt: arrow.LargeListOfField(field), + appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) }, + }, + } +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *baseListBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + b.values.Release() + b.offsets.Release() + } + +} + +func (b *baseListBuilder) appendNextOffset() { + b.appendOffsetVal(b.values.Len()) +} + +func (b *baseListBuilder) Append(v bool) { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(v) + b.appendNextOffset() +} + +func (b *baseListBuilder) AppendWithSize(v bool, _ int) { + b.Append(v) +} + +func (b *baseListBuilder) AppendNull() { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(false) + b.appendNextOffset() +} + +func (b *baseListBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + +func (b *baseListBuilder) AppendEmptyValue() { + b.Append(true) +} + +func (b *baseListBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + +func (b *ListBuilder) AppendValues(offsets []int32, valid []bool) { + b.Reserve(len(valid)) + b.offsets.(*Int32Builder).AppendValues(offsets, nil) + b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) +} + +func (b *LargeListBuilder) AppendValues(offsets []int64, valid []bool) { + b.Reserve(len(valid)) + b.offsets.(*Int64Builder).AppendValues(offsets, nil) + b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) +} + +func (b *baseListBuilder) unsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func (b *baseListBuilder) init(capacity int) { + b.builder.init(capacity) + b.offsets.init(capacity + 1) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *baseListBuilder) Reserve(n int) { + b.builder.reserve(n, b.resizeHelper) + b.offsets.Reserve(n) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *baseListBuilder) Resize(n int) { + b.resizeHelper(n) + b.offsets.Resize(n) +} + +func (b *baseListBuilder) resizeHelper(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.builder.init) + } +} + +func (b *baseListBuilder) ValueBuilder() Builder { + return b.values +} + +// NewArray creates a List array from the memory buffers used by the builder and resets the ListBuilder +// so it can be used to build a new array. +func (b *ListBuilder) NewArray() arrow.Array { + return b.NewListArray() +} + +// NewArray creates a LargeList array from the memory buffers used by the builder and resets the LargeListBuilder +// so it can be used to build a new array. +func (b *LargeListBuilder) NewArray() arrow.Array { + return b.NewLargeListArray() +} + +// NewListArray creates a List array from the memory buffers used by the builder and resets the ListBuilder +// so it can be used to build a new array. +func (b *ListBuilder) NewListArray() (a *List) { + data := b.newData() + a = NewListData(data) + data.Release() + return +} + +// NewLargeListArray creates a List array from the memory buffers used by the builder and resets the LargeListBuilder +// so it can be used to build a new array. +func (b *LargeListBuilder) NewLargeListArray() (a *LargeList) { + data := b.newData() + a = NewLargeListData(data) + data.Release() + return +} + +func (b *baseListBuilder) newData() (data *Data) { + if b.offsets.Len() != b.length+1 { + b.appendNextOffset() + } + values := b.values.NewArray() + defer values.Release() + + var offsets *memory.Buffer + if b.offsets != nil { + arr := b.offsets.NewArray() + defer arr.Release() + offsets = arr.Data().Buffers()[1] + } + + data = NewData( + b.Type(), b.length, + []*memory.Buffer{ + b.nullBitmap, + offsets, + }, + []arrow.ArrayData{values.Data()}, + b.nulls, + 0, + ) + b.reset() + + return +} + +func (b *baseListBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + + return b.UnmarshalOne(json.NewDecoder(strings.NewReader(s))) +} + +func (b *baseListBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch t { + case json.Delim('['): + b.Append(true) + if err := b.values.Unmarshal(dec); err != nil { + return err + } + // consume ']' + _, err := dec.Token() + return err + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Struct: b.dt.String(), + } + } + + return nil +} + +func (b *baseListBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *baseListBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("list builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +// ListView represents an immutable sequence of array values defined by an +// offset into a child array and a length. +type ListView struct { + array + values arrow.Array + offsets []int32 + sizes []int32 +} + +var _ VarLenListLike = (*ListView)(nil) + +func NewListViewData(data arrow.ArrayData) *ListView { + a := &ListView{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *ListView) ListValues() arrow.Array { return a.values } + +func (a *ListView) ValueStr(i int) string { + if !a.IsValid(i) { + return NullValueStr + } + return string(a.GetOneForMarshal(i).(json.RawMessage)) +} + +func (a *ListView) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + if a.IsNull(i) { + o.WriteString(NullValueStr) + continue + } + sub := a.newListValue(i) + fmt.Fprintf(o, "%v", sub) + sub.Release() + } + o.WriteString("]") + return o.String() +} + +func (a *ListView) newListValue(i int) arrow.Array { + beg, end := a.ValueOffsets(i) + return NewSlice(a.values, beg, end) +} + +func (a *ListView) setData(data *Data) { + debug.Assert(len(data.buffers) >= 3, "list-view data should have 3 buffers") + a.array.setData(data) + offsets := data.buffers[1] + if offsets != nil { + a.offsets = arrow.Int32Traits.CastFromBytes(offsets.Bytes()) + } + sizes := data.buffers[2] + if sizes != nil { + a.sizes = arrow.Int32Traits.CastFromBytes(sizes.Bytes()) + } + a.values = MakeFromData(data.childData[0]) +} + +func (a *ListView) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + slice := a.newListValue(i) + defer slice.Release() + v, err := json.Marshal(slice) + if err != nil { + panic(err) + } + return json.RawMessage(v) +} + +func (a *ListView) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func arrayEqualListView(left, right *ListView) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return Equal(l, r) + }() + if !o { + return false + } + } + return true +} + +// Len returns the number of elements in the array. +func (a *ListView) Len() int { return a.array.Len() } + +func (a *ListView) Offsets() []int32 { return a.offsets } + +func (a *ListView) Sizes() []int32 { return a.sizes } + +func (a *ListView) Retain() { + a.array.Retain() + a.values.Retain() +} + +func (a *ListView) Release() { + a.array.Release() + a.values.Release() +} + +func (a *ListView) ValueOffsets(i int) (start, end int64) { + debug.Assert(i >= 0 && i < a.array.data.length, "index out of range") + j := i + a.array.data.offset + size := int64(a.sizes[j]) + // If size is 0, skip accessing offsets. + if size == 0 { + start, end = 0, 0 + return + } + start = int64(a.offsets[j]) + end = start + size + return +} + +// LargeListView represents an immutable sequence of array values defined by an +// offset into a child array and a length. +type LargeListView struct { + array + values arrow.Array + offsets []int64 + sizes []int64 +} + +var _ VarLenListLike = (*LargeListView)(nil) + +// NewLargeListViewData returns a new LargeListView array value, from data. +func NewLargeListViewData(data arrow.ArrayData) *LargeListView { + a := new(LargeListView) + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +func (a *LargeListView) ListValues() arrow.Array { return a.values } + +func (a *LargeListView) ValueStr(i int) string { + if !a.IsValid(i) { + return NullValueStr + } + return string(a.GetOneForMarshal(i).(json.RawMessage)) +} + +func (a *LargeListView) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i := 0; i < a.Len(); i++ { + if i > 0 { + o.WriteString(" ") + } + if a.IsNull(i) { + o.WriteString(NullValueStr) + continue + } + sub := a.newListValue(i) + fmt.Fprintf(o, "%v", sub) + sub.Release() + } + o.WriteString("]") + return o.String() +} + +func (a *LargeListView) newListValue(i int) arrow.Array { + beg, end := a.ValueOffsets(i) + return NewSlice(a.values, beg, end) +} + +func (a *LargeListView) setData(data *Data) { + debug.Assert(len(data.buffers) >= 3, "list-view data should have 3 buffers") + a.array.setData(data) + offsets := data.buffers[1] + if offsets != nil { + a.offsets = arrow.Int64Traits.CastFromBytes(offsets.Bytes()) + } + sizes := data.buffers[2] + if sizes != nil { + a.sizes = arrow.Int64Traits.CastFromBytes(sizes.Bytes()) + } + a.values = MakeFromData(data.childData[0]) +} + +func (a *LargeListView) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + + slice := a.newListValue(i) + defer slice.Release() + v, err := json.Marshal(slice) + if err != nil { + panic(err) + } + return json.RawMessage(v) +} + +func (a *LargeListView) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + + buf.WriteByte('[') + for i := 0; i < a.Len(); i++ { + if i != 0 { + buf.WriteByte(',') + } + if err := enc.Encode(a.GetOneForMarshal(i)); err != nil { + return nil, err + } + } + buf.WriteByte(']') + return buf.Bytes(), nil +} + +func arrayEqualLargeListView(left, right *LargeListView) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + o := func() bool { + l := left.newListValue(i) + defer l.Release() + r := right.newListValue(i) + defer r.Release() + return Equal(l, r) + }() + if !o { + return false + } + } + return true +} + +// Len returns the number of elements in the array. +func (a *LargeListView) Len() int { return a.array.Len() } + +func (a *LargeListView) Offsets() []int64 { return a.offsets } + +func (a *LargeListView) Sizes() []int64 { return a.sizes } + +func (a *LargeListView) ValueOffsets(i int) (start, end int64) { + debug.Assert(i >= 0 && i < a.array.data.length, "index out of range") + j := i + a.array.data.offset + size := a.sizes[j] + // If size is 0, skip accessing offsets. + if size == 0 { + return 0, 0 + } + start = a.offsets[j] + end = start + size + return +} + +func (a *LargeListView) Retain() { + a.array.Retain() + a.values.Retain() +} + +func (a *LargeListView) Release() { + a.array.Release() + a.values.Release() +} + +// Acessors for offsets and sizes to make ListView and LargeListView validation generic. +type offsetsAndSizes interface { + offsetAt(slot int64) int64 + sizeAt(slot int64) int64 +} + +var _ offsetsAndSizes = (*ListView)(nil) +var _ offsetsAndSizes = (*LargeListView)(nil) + +func (a *ListView) offsetAt(slot int64) int64 { return int64(a.offsets[int64(a.data.offset)+slot]) } + +func (a *ListView) sizeAt(slot int64) int64 { return int64(a.sizes[int64(a.data.offset)+slot]) } + +func (a *LargeListView) offsetAt(slot int64) int64 { return a.offsets[int64(a.data.offset)+slot] } + +func (a *LargeListView) sizeAt(slot int64) int64 { return a.sizes[int64(a.data.offset)+slot] } + +func outOfBoundsListViewOffset(l offsetsAndSizes, slot int64, offsetLimit int64) error { + offset := l.offsetAt(slot) + return fmt.Errorf("%w: Offset invariant failure: offset for slot %d out of bounds. Expected %d to be at least 0 and less than %d", arrow.ErrInvalid, slot, offset, offsetLimit) +} + +func outOfBoundsListViewSize(l offsetsAndSizes, slot int64, offsetLimit int64) error { + size := l.sizeAt(slot) + if size < 0 { + return fmt.Errorf("%w: Offset invariant failure: size for slot %d out of bounds: %d < 0", arrow.ErrInvalid, slot, size) + } + offset := l.offsetAt(slot) + return fmt.Errorf("%w: Offset invariant failure: size for slot %d out of bounds: %d + %d > %d", arrow.ErrInvalid, slot, offset, size, offsetLimit) +} + +// Pre-condition: Basic validation has already been performed +func (a *array) fullyValidateOffsetsAndSizes(l offsetsAndSizes, offsetLimit int64) error { + for slot := int64(0); slot < int64(a.Len()); slot += 1 { + size := l.sizeAt(slot) + if size > 0 { + offset := l.offsetAt(slot) + if offset < 0 || offset > offsetLimit { + return outOfBoundsListViewOffset(l, slot, offsetLimit) + } + if size > offsetLimit-int64(offset) { + return outOfBoundsListViewSize(l, slot, offsetLimit) + } + } else if size < 0 { + return outOfBoundsListViewSize(l, slot, offsetLimit) + } + } + + return nil +} + +func (a *array) validateOffsetsAndMaybeSizes(l offsetsAndSizes, offsetByteWidth int, isListView bool, offsetLimit int64, fullValidation bool) error { + nonEmpty := a.Len() > 0 + if a.data.buffers[1] == nil { + // For length 0, an empty offsets buffer is accepted (ARROW-544). + if nonEmpty { + return fmt.Errorf("non-empty array but offsets are null") + } + return nil + } + if isListView && a.data.buffers[2] == nil { + if nonEmpty { + return fmt.Errorf("non-empty array but sizes are null") + } + return nil + } + + var requiredOffsets int + if nonEmpty { + requiredOffsets = a.Len() + a.Offset() + if !isListView { + requiredOffsets += 1 + } + } else { + requiredOffsets = 0 + } + offsetsByteSize := a.data.buffers[1].Len() + if offsetsByteSize/offsetByteWidth < requiredOffsets { + return fmt.Errorf("offsets buffer size (bytes): %d isn't large enough for length: %d and offset: %d", + offsetsByteSize, a.Len(), a.Offset()) + } + if isListView { + requiredSizes := a.Len() + a.Offset() + sizesBytesSize := a.data.buffers[2].Len() + if sizesBytesSize/offsetByteWidth < requiredSizes { + return fmt.Errorf("sizes buffer size (bytes): %d isn't large enough for length: %d and offset: %d", + sizesBytesSize, a.Len(), a.Offset()) + } + } + + if fullValidation && requiredOffsets > 0 { + if isListView { + return a.fullyValidateOffsetsAndSizes(l, offsetLimit) + } + // TODO: implement validation of List and LargeList + // return fullyValidateOffsets(offset_limit) + return nil + } + return nil +} + +func (a *ListView) validate(fullValidation bool) error { + values := a.array.data.childData[0] + offsetLimit := values.Len() + return a.array.validateOffsetsAndMaybeSizes(a, 4, true, int64(offsetLimit), fullValidation) +} + +func (a *ListView) Validate() error { + return a.validate(false) +} + +func (a *ListView) ValidateFull() error { + return a.validate(true) +} + +func (a *LargeListView) validate(fullValidation bool) error { + values := a.array.data.childData[0] + offsetLimit := values.Len() + return a.array.validateOffsetsAndMaybeSizes(a, 8, true, int64(offsetLimit), fullValidation) +} + +func (a *LargeListView) Validate() error { + return a.validate(false) +} + +func (a *LargeListView) ValidateFull() error { + return a.validate(true) +} + +type baseListViewBuilder struct { + builder + + values Builder // value builder for the list-view's elements. + offsets Builder + sizes Builder + + // actual list-view type + dt arrow.DataType + appendOffsetVal func(int) + appendSizeVal func(int) +} + +type ListViewBuilder struct { + baseListViewBuilder +} + +type LargeListViewBuilder struct { + baseListViewBuilder +} + +// NewListViewBuilder returns a builder, using the provided memory allocator. +// The created list-view builder will create a list whose elements will be +// of type etype. +func NewListViewBuilder(mem memory.Allocator, etype arrow.DataType) *ListViewBuilder { + offsetBldr := NewInt32Builder(mem) + sizeBldr := NewInt32Builder(mem) + return &ListViewBuilder{ + baseListViewBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, etype), + offsets: offsetBldr, + sizes: sizeBldr, + dt: arrow.ListViewOf(etype), + appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) }, + appendSizeVal: func(s int) { sizeBldr.Append(int32(s)) }, + }, + } +} + +// NewListViewBuilderWithField takes a field to use for the child rather than just +// a datatype to allow for more customization. +func NewListViewBuilderWithField(mem memory.Allocator, field arrow.Field) *ListViewBuilder { + offsetBldr := NewInt32Builder(mem) + sizeBldr := NewInt32Builder(mem) + return &ListViewBuilder{ + baseListViewBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, field.Type), + offsets: offsetBldr, + sizes: sizeBldr, + dt: arrow.ListViewOfField(field), + appendOffsetVal: func(o int) { offsetBldr.Append(int32(o)) }, + appendSizeVal: func(s int) { sizeBldr.Append(int32(s)) }, + }, + } +} + +func (b *baseListViewBuilder) Type() arrow.DataType { + switch dt := b.dt.(type) { + case *arrow.ListViewType: + f := dt.ElemField() + f.Type = b.values.Type() + return arrow.ListViewOfField(f) + case *arrow.LargeListViewType: + f := dt.ElemField() + f.Type = b.values.Type() + return arrow.LargeListViewOfField(f) + } + return nil +} + +// NewLargeListViewBuilder returns a builder, using the provided memory allocator. +// The created list-view builder will create a list whose elements will be of type etype. +func NewLargeListViewBuilder(mem memory.Allocator, etype arrow.DataType) *LargeListViewBuilder { + offsetBldr := NewInt64Builder(mem) + sizeBldr := NewInt64Builder(mem) + return &LargeListViewBuilder{ + baseListViewBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, etype), + offsets: offsetBldr, + sizes: sizeBldr, + dt: arrow.LargeListViewOf(etype), + appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) }, + appendSizeVal: func(s int) { sizeBldr.Append(int64(s)) }, + }, + } +} + +// NewLargeListViewBuilderWithField takes a field rather than just an element type +// to allow for more customization of the final type of the LargeListView Array +func NewLargeListViewBuilderWithField(mem memory.Allocator, field arrow.Field) *LargeListViewBuilder { + offsetBldr := NewInt64Builder(mem) + sizeBldr := NewInt64Builder(mem) + return &LargeListViewBuilder{ + baseListViewBuilder{ + builder: builder{refCount: 1, mem: mem}, + values: NewBuilder(mem, field.Type), + offsets: offsetBldr, + sizes: sizeBldr, + dt: arrow.LargeListViewOfField(field), + appendOffsetVal: func(o int) { offsetBldr.Append(int64(o)) }, + appendSizeVal: func(o int) { sizeBldr.Append(int64(o)) }, + }, + } +} + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *baseListViewBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + b.values.Release() + b.offsets.Release() + b.sizes.Release() + } +} + +func (b *baseListViewBuilder) AppendDimensions(offset int, listSize int) { + b.Reserve(1) + b.unsafeAppendBoolToBitmap(true) + b.appendOffsetVal(offset) + b.appendSizeVal(listSize) +} + +func (b *baseListViewBuilder) Append(v bool) { + debug.Assert(false, "baseListViewBuilder.Append should never be called -- use AppendWithSize instead") +} + +func (b *baseListViewBuilder) AppendWithSize(v bool, listSize int) { + debug.Assert(v || listSize == 0, "invalid list-view should have size 0") + b.Reserve(1) + b.unsafeAppendBoolToBitmap(v) + b.appendOffsetVal(b.values.Len()) + b.appendSizeVal(listSize) +} + +func (b *baseListViewBuilder) AppendNull() { + b.AppendWithSize(false, 0) +} + +func (b *baseListViewBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + +func (b *baseListViewBuilder) AppendEmptyValue() { + b.AppendWithSize(true, 0) +} + +func (b *baseListViewBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + +func (b *ListViewBuilder) AppendValuesWithSizes(offsets []int32, sizes []int32, valid []bool) { + b.Reserve(len(valid)) + b.offsets.(*Int32Builder).AppendValues(offsets, nil) + b.sizes.(*Int32Builder).AppendValues(sizes, nil) + b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) +} + +func (b *LargeListViewBuilder) AppendValuesWithSizes(offsets []int64, sizes []int64, valid []bool) { + b.Reserve(len(valid)) + b.offsets.(*Int64Builder).AppendValues(offsets, nil) + b.sizes.(*Int64Builder).AppendValues(sizes, nil) + b.builder.unsafeAppendBoolsToBitmap(valid, len(valid)) +} + +func (b *baseListViewBuilder) unsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +func (b *baseListViewBuilder) init(capacity int) { + b.builder.init(capacity) + b.offsets.init(capacity) + b.sizes.init(capacity) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *baseListViewBuilder) Reserve(n int) { + b.builder.reserve(n, b.resizeHelper) + b.offsets.Reserve(n) + b.sizes.Reserve(n) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *baseListViewBuilder) Resize(n int) { + b.resizeHelper(n) + b.offsets.Resize(n) + b.sizes.Resize(n) +} + +func (b *baseListViewBuilder) resizeHelper(n int) { + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(n, b.builder.init) + } +} + +func (b *baseListViewBuilder) ValueBuilder() Builder { + return b.values +} + +// NewArray creates a ListView array from the memory buffers used by the builder and +// resets the ListViewBuilder so it can be used to build a new array. +func (b *ListViewBuilder) NewArray() arrow.Array { + return b.NewListViewArray() +} + +// NewArray creates a LargeListView array from the memory buffers used by the builder +// and resets the LargeListViewBuilder so it can be used to build a new array. +func (b *LargeListViewBuilder) NewArray() arrow.Array { + return b.NewLargeListViewArray() +} + +// NewListViewArray creates a ListView array from the memory buffers used by the builder +// and resets the ListViewBuilder so it can be used to build a new array. +func (b *ListViewBuilder) NewListViewArray() (a *ListView) { + data := b.newData() + a = NewListViewData(data) + data.Release() + return +} + +// NewLargeListViewArray creates a ListView array from the memory buffers used by the +// builder and resets the LargeListViewBuilder so it can be used to build a new array. +func (b *LargeListViewBuilder) NewLargeListViewArray() (a *LargeListView) { + data := b.newData() + a = NewLargeListViewData(data) + data.Release() + return +} + +func (b *baseListViewBuilder) newData() (data *Data) { + values := b.values.NewArray() + defer values.Release() + + var offsets *memory.Buffer + if b.offsets != nil { + arr := b.offsets.NewArray() + defer arr.Release() + offsets = arr.Data().Buffers()[1] + } + + var sizes *memory.Buffer + if b.sizes != nil { + arr := b.sizes.NewArray() + defer arr.Release() + sizes = arr.Data().Buffers()[1] + } + + data = NewData( + b.Type(), b.length, + []*memory.Buffer{ + b.nullBitmap, + offsets, + sizes, + }, + []arrow.ArrayData{values.Data()}, + b.nulls, + 0, + ) + b.reset() + + return +} + +func (b *baseListViewBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + + return b.UnmarshalOne(json.NewDecoder(strings.NewReader(s))) +} + +func (b *baseListViewBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch t { + case json.Delim('['): + offset := b.values.Len() + // 0 is a placeholder size as we don't know the actual size yet + b.AppendWithSize(true, 0) + if err := b.values.Unmarshal(dec); err != nil { + return err + } + // consume ']' + _, err := dec.Token() + // replace the last size with the actual size + switch b.sizes.(type) { + case *Int32Builder: + b.sizes.(*Int32Builder).rawData[b.sizes.Len()-1] = int32(b.values.Len() - offset) + case *Int64Builder: + b.sizes.(*Int64Builder).rawData[b.sizes.Len()-1] = int64(b.values.Len() - offset) + } + return err + case nil: + b.AppendNull() + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Struct: b.dt.String(), + } + } + + return nil +} + +func (b *baseListViewBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *baseListViewBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("list-view builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +// Pre-conditions: +// +// input.DataType() is ListViewType +// input.Len() > 0 && input.NullN() != input.Len() +func minListViewOffset32(input arrow.ArrayData) int32 { + var bitmap []byte + if input.Buffers()[0] != nil { + bitmap = input.Buffers()[0].Bytes() + } + offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():] + sizes := arrow.Int32Traits.CastFromBytes(input.Buffers()[2].Bytes())[input.Offset():] + + isNull := func(i int) bool { + return bitmap != nil && bitutil.BitIsNotSet(bitmap, input.Offset()+i) + } + + // It's very likely that the first non-null non-empty list-view starts at + // offset 0 of the child array. + i := 0 + for i < input.Len() && (isNull(i) || sizes[i] == 0) { + i += 1 + } + if i >= input.Len() { + return 0 + } + minOffset := offsets[i] + if minOffset == 0 { + // early exit: offset 0 found already + return 0 + } + + // Slow path: scan the buffers entirely. + i += 1 + for ; i < input.Len(); i += 1 { + if isNull(i) { + continue + } + offset := offsets[i] + if offset < minOffset && sizes[i] > 0 { + minOffset = offset + } + } + return minOffset +} + +// Find the maximum offset+size in a LIST_VIEW array. +// +// Pre-conditions: +// +// input.DataType() is ListViewType +// input.Len() > 0 && input.NullN() != input.Len() +func maxListViewOffset32(input arrow.ArrayData) int { + inputOffset := input.Offset() + var bitmap []byte + if input.Buffers()[0] != nil { + bitmap = input.Buffers()[0].Bytes() + } + offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[inputOffset:] + sizes := arrow.Int32Traits.CastFromBytes(input.Buffers()[2].Bytes())[inputOffset:] + + isNull := func(i int) bool { + return bitmap != nil && bitutil.BitIsNotSet(bitmap, inputOffset+i) + } + + i := input.Len() - 1 // safe because input.Len() > 0 + for i != 0 && (isNull(i) || sizes[i] == 0) { + i -= 1 + } + offset := offsets[i] + size := sizes[i] + if i == 0 { + if isNull(i) || sizes[i] == 0 { + return 0 + } else { + return int(offset + size) + } + } + + values := input.Children()[0] + maxEnd := int(offsets[i] + sizes[i]) + if maxEnd == values.Len() { + // Early-exit: maximum possible view-end found already. + return maxEnd + } + + // Slow path: scan the buffers entirely. + for ; i >= 0; i -= 1 { + offset := offsets[i] + size := sizes[i] + if size > 0 && !isNull(i) { + if int(offset+size) > maxEnd { + maxEnd = int(offset + size) + if maxEnd == values.Len() { + return maxEnd + } + } + } + } + return maxEnd +} + +// Pre-conditions: +// +// input.DataType() is LargeListViewType +// input.Len() > 0 && input.NullN() != input.Len() +func minLargeListViewOffset64(input arrow.ArrayData) int64 { + var bitmap []byte + if input.Buffers()[0] != nil { + bitmap = input.Buffers()[0].Bytes() + } + offsets := arrow.Int64Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():] + sizes := arrow.Int64Traits.CastFromBytes(input.Buffers()[2].Bytes())[input.Offset():] + + isNull := func(i int) bool { + return bitmap != nil && bitutil.BitIsNotSet(bitmap, input.Offset()+i) + } + + // It's very likely that the first non-null non-empty list-view starts at + // offset 0 of the child array. + i := 0 + for i < input.Len() && (isNull(i) || sizes[i] == 0) { + i += 1 + } + if i >= input.Len() { + return 0 + } + minOffset := offsets[i] + if minOffset == 0 { + // early exit: offset 0 found already + return 0 + } + + // Slow path: scan the buffers entirely. + i += 1 + for ; i < input.Len(); i += 1 { + if isNull(i) { + continue + } + offset := offsets[i] + if offset < minOffset && sizes[i] > 0 { + minOffset = offset + } + } + return minOffset +} + +// Find the maximum offset+size in a LARGE_LIST_VIEW array. +// +// Pre-conditions: +// +// input.DataType() is LargeListViewType +// input.Len() > 0 && input.NullN() != input.Len() +func maxLargeListViewOffset64(input arrow.ArrayData) int64 { + inputOffset := input.Offset() + var bitmap []byte + if input.Buffers()[0] != nil { + bitmap = input.Buffers()[0].Bytes() + } + offsets := arrow.Int64Traits.CastFromBytes(input.Buffers()[1].Bytes())[inputOffset:] + sizes := arrow.Int64Traits.CastFromBytes(input.Buffers()[2].Bytes())[inputOffset:] + + isNull := func(i int) bool { + return bitmap != nil && bitutil.BitIsNotSet(bitmap, inputOffset+i) + } + + // It's very likely that the first non-null non-empty list-view starts at + // offset zero, so we check that first and potentially early-return a 0. + i := input.Len() - 1 // safe because input.Len() > 0 + for i != 0 && (isNull(i) || sizes[i] == 0) { + i -= 1 + } + offset := offsets[i] + size := sizes[i] + if i == 0 { + if isNull(i) || sizes[i] == 0 { + return 0 + } else { + return offset + size + } + } + + if offset > math.MaxInt64-size { + // Early-exit: 64-bit overflow detected. This is not possible on a + // valid list-view, but we return the maximum possible value to + // avoid undefined behavior. + return math.MaxInt64 + } + values := input.Children()[0] + maxEnd := offsets[i] + sizes[i] + if maxEnd == int64(values.Len()) { + // Early-exit: maximum possible view-end found already. + return maxEnd + } + + // Slow path: scan the buffers entirely. + for ; i >= 0; i -= 1 { + offset := offsets[i] + size := sizes[i] + if size > 0 && !isNull(i) { + if offset+size > maxEnd { + if offset > math.MaxInt64-size { + // 64-bit overflow detected. This is not possible on a valid list-view, + // but we saturate maxEnd to the maximum possible value to avoid + // undefined behavior. + return math.MaxInt64 + } + maxEnd = offset + size + if maxEnd == int64(values.Len()) { + return maxEnd + } + } + } + } + return maxEnd +} + +func rangeOfValuesUsed(input arrow.ArrayData) (int, int) { + if input.Len() == 0 || input.NullN() == input.Len() { + return 0, 0 + } + var minOffset, maxEnd int + switch input.DataType().(type) { + case *arrow.ListViewType: + minOffset = int(minListViewOffset32(input)) + maxEnd = maxListViewOffset32(input) + case *arrow.LargeListViewType: + minOffset = int(minLargeListViewOffset64(input)) + maxEnd = int(maxLargeListViewOffset64(input)) + case *arrow.ListType: + offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():] + minOffset = int(offsets[0]) + maxEnd = int(offsets[len(offsets)-1]) + case *arrow.LargeListType: + offsets := arrow.Int64Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():] + minOffset = int(offsets[0]) + maxEnd = int(offsets[len(offsets)-1]) + case *arrow.MapType: + offsets := arrow.Int32Traits.CastFromBytes(input.Buffers()[1].Bytes())[input.Offset():] + minOffset = int(offsets[0]) + maxEnd = int(offsets[len(offsets)-1]) + } + return minOffset, maxEnd - minOffset +} + +// Returns the smallest contiguous range of values of the child array that are +// referenced by all the list values in the input array. +func RangeOfValuesUsed(input VarLenListLike) (int, int) { + return rangeOfValuesUsed(input.Data()) +} + +var ( + _ arrow.Array = (*List)(nil) + _ arrow.Array = (*LargeList)(nil) + _ arrow.Array = (*ListView)(nil) + _ arrow.Array = (*LargeListView)(nil) + + _ Builder = (*ListBuilder)(nil) + _ Builder = (*LargeListBuilder)(nil) + _ Builder = (*ListViewBuilder)(nil) + _ Builder = (*LargeListViewBuilder)(nil) + + _ VarLenListLike = (*List)(nil) + _ VarLenListLike = (*LargeList)(nil) + _ VarLenListLike = (*Map)(nil) + _ VarLenListLike = (*ListView)(nil) + _ VarLenListLike = (*LargeListView)(nil) + _ ListLike = (*FixedSizeList)(nil) + + _ VarLenListLikeBuilder = (*ListBuilder)(nil) + _ VarLenListLikeBuilder = (*LargeListBuilder)(nil) + _ VarLenListLikeBuilder = (*ListBuilder)(nil) + _ VarLenListLikeBuilder = (*LargeListBuilder)(nil) + _ VarLenListLikeBuilder = (*MapBuilder)(nil) + _ ListLikeBuilder = (*FixedSizeListBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/map.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/map.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/map.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/map.go index b2b71742..9945a90c 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/map.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/map.go @@ -20,9 +20,9 @@ import ( "bytes" "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // Map represents an immutable sequence of Key/Value structs. It is a @@ -32,6 +32,8 @@ type Map struct { keys, items arrow.Array } +var _ ListLike = (*Map)(nil) + // NewMapData returns a new Map array value, from data func NewMapData(data arrow.ArrayData) *Map { a := &Map{List: &List{}} @@ -126,7 +128,7 @@ type MapBuilder struct { // building using keys in sorted order for each value. The KeysSorted value will just be // used when creating the DataType for the map. // -// Example +// # Example // // Simple example provided of converting a []map[string]int32 to an array.Map // by using a MapBuilder: @@ -148,7 +150,7 @@ type MapBuilder struct { func NewMapBuilder(mem memory.Allocator, keytype, itemtype arrow.DataType, keysSorted bool) *MapBuilder { etype := arrow.MapOf(keytype, itemtype) etype.KeysSorted = keysSorted - listBldr := NewListBuilder(mem, etype.ValueType()) + listBldr := NewListBuilder(mem, etype.Elem()) keyBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(0) keyBldr.Retain() itemBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(1) @@ -165,7 +167,7 @@ func NewMapBuilder(mem memory.Allocator, keytype, itemtype arrow.DataType, keysS } func NewMapBuilderWithType(mem memory.Allocator, dt *arrow.MapType) *MapBuilder { - listBldr := NewListBuilder(mem, dt.ValueType()) + listBldr := NewListBuilder(mem, dt.Elem()) keyBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(0) keyBldr.Retain() itemBldr := listBldr.ValueBuilder().(*StructBuilder).FieldBuilder(1) @@ -176,7 +178,7 @@ func NewMapBuilderWithType(mem memory.Allocator, dt *arrow.MapType) *MapBuilder itemBuilder: itemBldr, etype: dt, keytype: dt.KeyType(), - itemtype: dt.ValueType(), + itemtype: dt.ItemType(), keysSorted: dt.KeysSorted, } } @@ -208,6 +210,11 @@ func (b *MapBuilder) Cap() int { return b.listBuilder.Cap() } // NullN returns the number of null values in the array builder. func (b *MapBuilder) NullN() int { return b.listBuilder.NullN() } +// IsNull returns if a previously appended value at a given index is null or not. +func (b *MapBuilder) IsNull(i int) bool { + return b.listBuilder.IsNull(i) +} + // Append adds a new Map element to the array, calling Append(false) is // equivalent to calling AppendNull. func (b *MapBuilder) Append(v bool) { @@ -215,15 +222,36 @@ func (b *MapBuilder) Append(v bool) { b.listBuilder.Append(v) } +func (b *MapBuilder) AppendWithSize(v bool, _ int) { + b.Append(v) +} + // AppendNull adds a null map entry to the array. func (b *MapBuilder) AppendNull() { b.Append(false) } +// AppendNulls adds null map entry to the array. +func (b *MapBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + +func (b *MapBuilder) SetNull(i int) { + b.listBuilder.SetNull(i) +} + func (b *MapBuilder) AppendEmptyValue() { b.Append(true) } +func (b *MapBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + // Reserve enough space for n maps func (b *MapBuilder) Reserve(n int) { b.listBuilder.Reserve(n) } @@ -300,6 +328,10 @@ func (b *MapBuilder) ValueBuilder() Builder { return b.listBuilder.ValueBuilder() } +func (b *MapBuilder) AppendValueFromString(s string) error { + return b.listBuilder.AppendValueFromString(s) +} + func (b *MapBuilder) UnmarshalOne(dec *json.Decoder) error { return b.listBuilder.UnmarshalOne(dec) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/null.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/null.go similarity index 86% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/null.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/null.go index f48d8387..150a1030 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/null.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/null.go @@ -23,10 +23,10 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // Null represents an immutable, degenerate array with no physical storage. @@ -58,6 +58,10 @@ func NewNullData(data arrow.ArrayData) *Null { return a } +func (a *Null) ValueStr(int) string { return NullValueStr } + +func (a *Null) Value(int) interface{} { return nil } + func (a *Null) String() string { o := new(strings.Builder) o.WriteString("[") @@ -65,7 +69,7 @@ func (a *Null) String() string { if i > 0 { o.WriteString(" ") } - o.WriteString("(null)") + o.WriteString(NullValueStr) } o.WriteString("]") return o.String() @@ -114,8 +118,24 @@ func (b *NullBuilder) AppendNull() { b.builder.nulls++ } +func (b *NullBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + +func (b *NullBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + return fmt.Errorf("cannot convert %q to null", s) +} + func (b *NullBuilder) AppendEmptyValue() { b.AppendNull() } +func (b *NullBuilder) AppendEmptyValues(n int) { b.AppendNulls(n) } + func (*NullBuilder) Reserve(size int) {} func (*NullBuilder) Resize(size int) {} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go similarity index 90% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go index 08dc7750..a3e11015 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go @@ -20,10 +20,11 @@ package array import ( "fmt" + "strconv" "strings" - "github.com/apache/arrow/go/v12/arrow" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/internal/json" ) // A type which represents an immutable sequence of int64 values. @@ -61,7 +62,7 @@ func (a *Int64) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -81,6 +82,13 @@ func (a *Int64) setData(data *Data) { } } +func (a *Int64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatInt(int64(a.Value(i)), 10) +} + func (a *Int64) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -93,7 +101,7 @@ func (a *Int64) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { if a.IsValid(i) { - vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + vals[i] = a.values[i] } else { vals[i] = nil } @@ -149,7 +157,7 @@ func (a *Uint64) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -169,6 +177,13 @@ func (a *Uint64) setData(data *Data) { } } +func (a *Uint64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatUint(uint64(a.Value(i)), 10) +} + func (a *Uint64) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -181,7 +196,7 @@ func (a *Uint64) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { if a.IsValid(i) { - vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + vals[i] = a.values[i] } else { vals[i] = nil } @@ -237,7 +252,7 @@ func (a *Float64) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -257,6 +272,13 @@ func (a *Float64) setData(data *Data) { } } +func (a *Float64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 64) +} + func (a *Float64) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -269,7 +291,7 @@ func (a *Float64) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { if a.IsValid(i) { - vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + vals[i] = a.values[i] } else { vals[i] = nil } @@ -325,7 +347,7 @@ func (a *Int32) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -345,6 +367,13 @@ func (a *Int32) setData(data *Data) { } } +func (a *Int32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatInt(int64(a.Value(i)), 10) +} + func (a *Int32) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -357,7 +386,7 @@ func (a *Int32) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { if a.IsValid(i) { - vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + vals[i] = a.values[i] } else { vals[i] = nil } @@ -413,7 +442,7 @@ func (a *Uint32) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -433,6 +462,13 @@ func (a *Uint32) setData(data *Data) { } } +func (a *Uint32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatUint(uint64(a.Value(i)), 10) +} + func (a *Uint32) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -445,7 +481,7 @@ func (a *Uint32) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { if a.IsValid(i) { - vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + vals[i] = a.values[i] } else { vals[i] = nil } @@ -501,7 +537,7 @@ func (a *Float32) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -521,6 +557,13 @@ func (a *Float32) setData(data *Data) { } } +func (a *Float32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 32) +} + func (a *Float32) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -533,7 +576,7 @@ func (a *Float32) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { if a.IsValid(i) { - vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + vals[i] = a.values[i] } else { vals[i] = nil } @@ -589,7 +632,7 @@ func (a *Int16) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -609,6 +652,13 @@ func (a *Int16) setData(data *Data) { } } +func (a *Int16) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatInt(int64(a.Value(i)), 10) +} + func (a *Int16) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -621,7 +671,7 @@ func (a *Int16) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { if a.IsValid(i) { - vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + vals[i] = a.values[i] } else { vals[i] = nil } @@ -677,7 +727,7 @@ func (a *Uint16) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -697,6 +747,13 @@ func (a *Uint16) setData(data *Data) { } } +func (a *Uint16) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatUint(uint64(a.Value(i)), 10) +} + func (a *Uint16) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -709,7 +766,7 @@ func (a *Uint16) MarshalJSON() ([]byte, error) { vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { if a.IsValid(i) { - vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + vals[i] = a.values[i] } else { vals[i] = nil } @@ -765,7 +822,7 @@ func (a *Int8) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -785,6 +842,13 @@ func (a *Int8) setData(data *Data) { } } +func (a *Int8) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatInt(int64(a.Value(i)), 10) +} + func (a *Int8) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -853,7 +917,7 @@ func (a *Uint8) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -873,6 +937,13 @@ func (a *Uint8) setData(data *Data) { } } +func (a *Uint8) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return strconv.FormatUint(uint64(a.Value(i)), 10) +} + func (a *Uint8) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -906,89 +977,6 @@ func arrayEqualUint8(left, right *Uint8) bool { return true } -// A type which represents an immutable sequence of arrow.Timestamp values. -type Timestamp struct { - array - values []arrow.Timestamp -} - -// NewTimestampData creates a new Timestamp. -func NewTimestampData(data arrow.ArrayData) *Timestamp { - a := &Timestamp{} - a.refCount = 1 - a.setData(data.(*Data)) - return a -} - -// Reset resets the array for re-use. -func (a *Timestamp) Reset(data *Data) { - a.setData(data) -} - -// Value returns the value at the specified index. -func (a *Timestamp) Value(i int) arrow.Timestamp { return a.values[i] } - -// Values returns the values. -func (a *Timestamp) TimestampValues() []arrow.Timestamp { return a.values } - -// String returns a string representation of the array. -func (a *Timestamp) String() string { - o := new(strings.Builder) - o.WriteString("[") - for i, v := range a.values { - if i > 0 { - fmt.Fprintf(o, " ") - } - switch { - case a.IsNull(i): - o.WriteString("(null)") - default: - fmt.Fprintf(o, "%v", v) - } - } - o.WriteString("]") - return o.String() -} - -func (a *Timestamp) setData(data *Data) { - a.array.setData(data) - vals := data.buffers[1] - if vals != nil { - a.values = arrow.TimestampTraits.CastFromBytes(vals.Bytes()) - beg := a.array.data.offset - end := beg + a.array.data.length - a.values = a.values[beg:end] - } -} - -func (a *Timestamp) GetOneForMarshal(i int) interface{} { - if a.IsNull(i) { - return nil - } - return a.values[i].ToTime(a.DataType().(*arrow.TimestampType).Unit).Format("2006-01-02 15:04:05.999999999") -} - -func (a *Timestamp) MarshalJSON() ([]byte, error) { - vals := make([]interface{}, a.Len()) - for i := range a.values { - vals[i] = a.GetOneForMarshal(i) - } - - return json.Marshal(vals) -} - -func arrayEqualTimestamp(left, right *Timestamp) bool { - for i := 0; i < left.Len(); i++ { - if left.IsNull(i) { - continue - } - if left.Value(i) != right.Value(i) { - return false - } - } - return true -} - // A type which represents an immutable sequence of arrow.Time32 values. type Time32 struct { array @@ -1024,7 +1012,7 @@ func (a *Time32) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -1044,6 +1032,13 @@ func (a *Time32) setData(data *Data) { } } +func (a *Time32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.values[i].FormattedString(a.DataType().(*arrow.Time32Type).Unit) +} + func (a *Time32) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -1107,7 +1102,7 @@ func (a *Time64) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -1127,6 +1122,13 @@ func (a *Time64) setData(data *Data) { } } +func (a *Time64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.values[i].FormattedString(a.DataType().(*arrow.Time64Type).Unit) +} + func (a *Time64) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -1190,7 +1192,7 @@ func (a *Date32) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -1210,6 +1212,13 @@ func (a *Date32) setData(data *Data) { } } +func (a *Date32) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.values[i].FormattedString() +} + func (a *Date32) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -1273,7 +1282,7 @@ func (a *Date64) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -1293,6 +1302,13 @@ func (a *Date64) setData(data *Data) { } } +func (a *Date64) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.values[i].FormattedString() +} + func (a *Date64) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -1356,7 +1372,7 @@ func (a *Duration) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -1376,6 +1392,14 @@ func (a *Duration) setData(data *Data) { } } +func (a *Duration) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + // return value and suffix as a string such as "12345ms" + return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*arrow.DurationType).Unit) +} + func (a *Duration) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl similarity index 70% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl index 3fd7ae6e..34d17fbf 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/numeric.gen.go.tmpl +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numeric.gen.go.tmpl @@ -21,8 +21,8 @@ import ( "strings" "time" - "github.com/apache/arrow/go/v12/arrow" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/internal/json" ) {{range .In}} @@ -62,7 +62,7 @@ func (a *{{.Name}}) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%v", v) } @@ -82,6 +82,30 @@ func (a *{{.Name}}) setData(data *Data) { } } +func (a *{{.Name}}) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } +{{if or (eq .Name "Date32") (eq .Name "Date64") -}} + return a.values[i].FormattedString() +{{else if or (eq .Name "Time32") (eq .Name "Time64") -}} + return a.values[i].FormattedString(a.DataType().(*{{.QualifiedType}}Type).Unit) +{{else if (eq .Name "Duration") -}} + // return value and suffix as a string such as "12345ms" + return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*{{.QualifiedType}}Type).Unit) +{{else if or (eq .Name "Int8") (eq .Name "Int16") (eq .Name "Int32") (eq .Name "Int64") -}} + return strconv.FormatInt(int64(a.Value(i)), 10) +{{else if or (eq .Name "Uint8") (eq .Name "Uint16") (eq .Name "Uint32") (eq .Name "Uint64") -}} + return strconv.FormatUint(uint64(a.Value(i)), 10) +{{else if or (eq .Name "Float32") -}} + return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 32) +{{else if or (eq .Name "Float64") -}} + return strconv.FormatFloat(float64(a.Value(i)), 'g', -1, 64) +{{else}} + return fmt.Sprintf("%v", a.values[i]) +{{end -}} +} + func (a *{{.Name}}) GetOneForMarshal(i int) interface{} { if a.IsNull(i) { return nil @@ -90,11 +114,9 @@ func (a *{{.Name}}) GetOneForMarshal(i int) interface{} { return a.values[i].ToTime().Format("2006-01-02") {{else if or (eq .Name "Time32") (eq .Name "Time64") -}} return a.values[i].ToTime(a.DataType().(*{{.QualifiedType}}Type).Unit).Format("15:04:05.999999999") -{{else if or (eq .Name "Timestamp") -}} - return a.values[i].ToTime(a.DataType().(*{{.QualifiedType}}Type).Unit).Format("2006-01-02 15:04:05.999999999") -{{else if (eq .Name "Duration") -}} +{{else if (eq .Name "Duration") -}} // return value and suffix as a string such as "12345ms" - return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*{{.QualifiedType}}Type).Unit.String()) + return fmt.Sprintf("%d%s", a.values[i], a.DataType().(*{{.QualifiedType}}Type).Unit.String()) {{else if (eq .Size "1")}} return float64(a.values[i]) // prevent uint8 from being seen as binary data {{else}} @@ -106,13 +128,13 @@ func (a *{{.Name}}) MarshalJSON() ([]byte, error) { {{if .QualifiedType -}} vals := make([]interface{}, a.Len()) for i := range a.values { - vals[i] = a.getOneForMarshal(i) + vals[i] = a.GetOneForMarshal(i) } {{else -}} vals := make([]interface{}, a.Len()) for i := 0; i < a.Len(); i++ { if a.IsValid(i) { - vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data + {{ if (eq .Size "1") }}vals[i] = float64(a.values[i]) // prevent uint8 from being seen as binary data{{ else }}vals[i] = a.values[i]{{ end }} } else { vals[i] = nil } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go similarity index 90% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go index 81cf262f..7f01180f 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go @@ -27,11 +27,11 @@ import ( "sync/atomic" "time" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) type Int64Builder struct { @@ -75,10 +75,22 @@ func (b *Int64Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Int64Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Int64Builder) AppendEmptyValue() { b.Append(0) } +func (b *Int64Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Int64Builder) UnsafeAppend(v int64) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -143,6 +155,10 @@ func (b *Int64Builder) Resize(n int) { } } +func (b *Int64Builder) Value(i int) int64 { + return b.rawData[i] +} + // NewArray creates a Int64 array from the memory buffers used by the builder and resets the Int64Builder // so it can be used to build a new array. func (b *Int64Builder) NewArray() arrow.Array { @@ -176,6 +192,20 @@ func (b *Int64Builder) newData() (data *Data) { return } +func (b *Int64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 8*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(int64(v)) + return nil +} + func (b *Int64Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -284,10 +314,22 @@ func (b *Uint64Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Uint64Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Uint64Builder) AppendEmptyValue() { b.Append(0) } +func (b *Uint64Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Uint64Builder) UnsafeAppend(v uint64) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -352,6 +394,10 @@ func (b *Uint64Builder) Resize(n int) { } } +func (b *Uint64Builder) Value(i int) uint64 { + return b.rawData[i] +} + // NewArray creates a Uint64 array from the memory buffers used by the builder and resets the Uint64Builder // so it can be used to build a new array. func (b *Uint64Builder) NewArray() arrow.Array { @@ -385,6 +431,20 @@ func (b *Uint64Builder) newData() (data *Data) { return } +func (b *Uint64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseUint(s, 10, 8*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(uint64(v)) + return nil +} + func (b *Uint64Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -493,10 +553,22 @@ func (b *Float64Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Float64Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Float64Builder) AppendEmptyValue() { b.Append(0) } +func (b *Float64Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Float64Builder) UnsafeAppend(v float64) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -561,6 +633,10 @@ func (b *Float64Builder) Resize(n int) { } } +func (b *Float64Builder) Value(i int) float64 { + return b.rawData[i] +} + // NewArray creates a Float64 array from the memory buffers used by the builder and resets the Float64Builder // so it can be used to build a new array. func (b *Float64Builder) NewArray() arrow.Array { @@ -594,6 +670,20 @@ func (b *Float64Builder) newData() (data *Data) { return } +func (b *Float64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseFloat(s, 8*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(float64(v)) + return nil +} + func (b *Float64Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -702,10 +792,22 @@ func (b *Int32Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Int32Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Int32Builder) AppendEmptyValue() { b.Append(0) } +func (b *Int32Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Int32Builder) UnsafeAppend(v int32) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -770,6 +872,10 @@ func (b *Int32Builder) Resize(n int) { } } +func (b *Int32Builder) Value(i int) int32 { + return b.rawData[i] +} + // NewArray creates a Int32 array from the memory buffers used by the builder and resets the Int32Builder // so it can be used to build a new array. func (b *Int32Builder) NewArray() arrow.Array { @@ -803,6 +909,20 @@ func (b *Int32Builder) newData() (data *Data) { return } +func (b *Int32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 4*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(int32(v)) + return nil +} + func (b *Int32Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -911,10 +1031,22 @@ func (b *Uint32Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Uint32Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Uint32Builder) AppendEmptyValue() { b.Append(0) } +func (b *Uint32Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Uint32Builder) UnsafeAppend(v uint32) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -979,6 +1111,10 @@ func (b *Uint32Builder) Resize(n int) { } } +func (b *Uint32Builder) Value(i int) uint32 { + return b.rawData[i] +} + // NewArray creates a Uint32 array from the memory buffers used by the builder and resets the Uint32Builder // so it can be used to build a new array. func (b *Uint32Builder) NewArray() arrow.Array { @@ -1012,6 +1148,20 @@ func (b *Uint32Builder) newData() (data *Data) { return } +func (b *Uint32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseUint(s, 10, 4*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(uint32(v)) + return nil +} + func (b *Uint32Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -1120,10 +1270,22 @@ func (b *Float32Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Float32Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Float32Builder) AppendEmptyValue() { b.Append(0) } +func (b *Float32Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Float32Builder) UnsafeAppend(v float32) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -1188,6 +1350,10 @@ func (b *Float32Builder) Resize(n int) { } } +func (b *Float32Builder) Value(i int) float32 { + return b.rawData[i] +} + // NewArray creates a Float32 array from the memory buffers used by the builder and resets the Float32Builder // so it can be used to build a new array. func (b *Float32Builder) NewArray() arrow.Array { @@ -1221,6 +1387,20 @@ func (b *Float32Builder) newData() (data *Data) { return } +func (b *Float32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseFloat(s, 4*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(float32(v)) + return nil +} + func (b *Float32Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -1329,10 +1509,22 @@ func (b *Int16Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Int16Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Int16Builder) AppendEmptyValue() { b.Append(0) } +func (b *Int16Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Int16Builder) UnsafeAppend(v int16) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -1397,6 +1589,10 @@ func (b *Int16Builder) Resize(n int) { } } +func (b *Int16Builder) Value(i int) int16 { + return b.rawData[i] +} + // NewArray creates a Int16 array from the memory buffers used by the builder and resets the Int16Builder // so it can be used to build a new array. func (b *Int16Builder) NewArray() arrow.Array { @@ -1430,6 +1626,20 @@ func (b *Int16Builder) newData() (data *Data) { return } +func (b *Int16Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 2*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(int16(v)) + return nil +} + func (b *Int16Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -1538,10 +1748,22 @@ func (b *Uint16Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Uint16Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Uint16Builder) AppendEmptyValue() { b.Append(0) } +func (b *Uint16Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Uint16Builder) UnsafeAppend(v uint16) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -1606,6 +1828,10 @@ func (b *Uint16Builder) Resize(n int) { } } +func (b *Uint16Builder) Value(i int) uint16 { + return b.rawData[i] +} + // NewArray creates a Uint16 array from the memory buffers used by the builder and resets the Uint16Builder // so it can be used to build a new array. func (b *Uint16Builder) NewArray() arrow.Array { @@ -1639,6 +1865,20 @@ func (b *Uint16Builder) newData() (data *Data) { return } +func (b *Uint16Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseUint(s, 10, 2*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(uint16(v)) + return nil +} + func (b *Uint16Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -1747,10 +1987,22 @@ func (b *Int8Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Int8Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Int8Builder) AppendEmptyValue() { b.Append(0) } +func (b *Int8Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Int8Builder) UnsafeAppend(v int8) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -1815,6 +2067,10 @@ func (b *Int8Builder) Resize(n int) { } } +func (b *Int8Builder) Value(i int) int8 { + return b.rawData[i] +} + // NewArray creates a Int8 array from the memory buffers used by the builder and resets the Int8Builder // so it can be used to build a new array. func (b *Int8Builder) NewArray() arrow.Array { @@ -1848,6 +2104,20 @@ func (b *Int8Builder) newData() (data *Data) { return } +func (b *Int8Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseInt(s, 10, 1*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(int8(v)) + return nil +} + func (b *Int8Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -1956,10 +2226,22 @@ func (b *Uint8Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Uint8Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Uint8Builder) AppendEmptyValue() { b.Append(0) } +func (b *Uint8Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Uint8Builder) UnsafeAppend(v uint8) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -2024,6 +2306,10 @@ func (b *Uint8Builder) Resize(n int) { } } +func (b *Uint8Builder) Value(i int) uint8 { + return b.rawData[i] +} + // NewArray creates a Uint8 array from the memory buffers used by the builder and resets the Uint8Builder // so it can be used to build a new array. func (b *Uint8Builder) NewArray() arrow.Array { @@ -2057,6 +2343,20 @@ func (b *Uint8Builder) newData() (data *Data) { return } +func (b *Uint8Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + v, err := strconv.ParseUint(s, 10, 1*8) + if err != nil { + b.AppendNull() + return err + } + b.Append(uint8(v)) + return nil +} + func (b *Uint8Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -2124,218 +2424,6 @@ func (b *Uint8Builder) UnmarshalJSON(data []byte) error { return b.Unmarshal(dec) } -type TimestampBuilder struct { - builder - - dtype *arrow.TimestampType - data *memory.Buffer - rawData []arrow.Timestamp -} - -func NewTimestampBuilder(mem memory.Allocator, dtype *arrow.TimestampType) *TimestampBuilder { - return &TimestampBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} -} - -func (b *TimestampBuilder) Type() arrow.DataType { return b.dtype } - -// Release decreases the reference count by 1. -// When the reference count goes to zero, the memory is freed. -func (b *TimestampBuilder) Release() { - debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") - - if atomic.AddInt64(&b.refCount, -1) == 0 { - if b.nullBitmap != nil { - b.nullBitmap.Release() - b.nullBitmap = nil - } - if b.data != nil { - b.data.Release() - b.data = nil - b.rawData = nil - } - } -} - -func (b *TimestampBuilder) Append(v arrow.Timestamp) { - b.Reserve(1) - b.UnsafeAppend(v) -} - -func (b *TimestampBuilder) AppendNull() { - b.Reserve(1) - b.UnsafeAppendBoolToBitmap(false) -} - -func (b *TimestampBuilder) AppendEmptyValue() { - b.Append(0) -} - -func (b *TimestampBuilder) UnsafeAppend(v arrow.Timestamp) { - bitutil.SetBit(b.nullBitmap.Bytes(), b.length) - b.rawData[b.length] = v - b.length++ -} - -func (b *TimestampBuilder) UnsafeAppendBoolToBitmap(isValid bool) { - if isValid { - bitutil.SetBit(b.nullBitmap.Bytes(), b.length) - } else { - b.nulls++ - } - b.length++ -} - -// AppendValues will append the values in the v slice. The valid slice determines which values -// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, -// all values in v are appended and considered valid. -func (b *TimestampBuilder) AppendValues(v []arrow.Timestamp, valid []bool) { - if len(v) != len(valid) && len(valid) != 0 { - panic("len(v) != len(valid) && len(valid) != 0") - } - - if len(v) == 0 { - return - } - - b.Reserve(len(v)) - arrow.TimestampTraits.Copy(b.rawData[b.length:], v) - b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) -} - -func (b *TimestampBuilder) init(capacity int) { - b.builder.init(capacity) - - b.data = memory.NewResizableBuffer(b.mem) - bytesN := arrow.TimestampTraits.BytesRequired(capacity) - b.data.Resize(bytesN) - b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes()) -} - -// Reserve ensures there is enough space for appending n elements -// by checking the capacity and calling Resize if necessary. -func (b *TimestampBuilder) Reserve(n int) { - b.builder.reserve(n, b.Resize) -} - -// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), -// additional memory will be allocated. If n is smaller, the allocated memory may reduced. -func (b *TimestampBuilder) Resize(n int) { - nBuilder := n - if n < minBuilderCapacity { - n = minBuilderCapacity - } - - if b.capacity == 0 { - b.init(n) - } else { - b.builder.resize(nBuilder, b.init) - b.data.Resize(arrow.TimestampTraits.BytesRequired(n)) - b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes()) - } -} - -// NewArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder -// so it can be used to build a new array. -func (b *TimestampBuilder) NewArray() arrow.Array { - return b.NewTimestampArray() -} - -// NewTimestampArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder -// so it can be used to build a new array. -func (b *TimestampBuilder) NewTimestampArray() (a *Timestamp) { - data := b.newData() - a = NewTimestampData(data) - data.Release() - return -} - -func (b *TimestampBuilder) newData() (data *Data) { - bytesRequired := arrow.TimestampTraits.BytesRequired(b.length) - if bytesRequired > 0 && bytesRequired < b.data.Len() { - // trim buffers - b.data.Resize(bytesRequired) - } - data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) - b.reset() - - if b.data != nil { - b.data.Release() - b.data = nil - b.rawData = nil - } - - return -} - -func (b *TimestampBuilder) UnmarshalOne(dec *json.Decoder) error { - t, err := dec.Token() - if err != nil { - return err - } - - switch v := t.(type) { - case nil: - b.AppendNull() - case string: - loc, _ := b.dtype.GetZone() - tm, _, err := arrow.TimestampFromStringInLocation(v, b.dtype.Unit, loc) - - if err != nil { - return &json.UnmarshalTypeError{ - Value: v, - Type: reflect.TypeOf(arrow.Timestamp(0)), - Offset: dec.InputOffset(), - } - } - - b.Append(tm) - case json.Number: - n, err := v.Int64() - if err != nil { - return &json.UnmarshalTypeError{ - Value: v.String(), - Type: reflect.TypeOf(arrow.Timestamp(0)), - Offset: dec.InputOffset(), - } - } - b.Append(arrow.Timestamp(n)) - case float64: - b.Append(arrow.Timestamp(v)) - - default: - return &json.UnmarshalTypeError{ - Value: fmt.Sprint(t), - Type: reflect.TypeOf(arrow.Timestamp(0)), - Offset: dec.InputOffset(), - } - } - - return nil -} - -func (b *TimestampBuilder) Unmarshal(dec *json.Decoder) error { - for dec.More() { - if err := b.UnmarshalOne(dec); err != nil { - return err - } - } - return nil -} - -func (b *TimestampBuilder) UnmarshalJSON(data []byte) error { - dec := json.NewDecoder(bytes.NewReader(data)) - t, err := dec.Token() - if err != nil { - return err - } - - if delim, ok := t.(json.Delim); !ok || delim != '[' { - return fmt.Errorf("binary builder must unpack from json array, found %s", delim) - } - - return b.Unmarshal(dec) -} - type Time32Builder struct { builder @@ -2378,10 +2466,22 @@ func (b *Time32Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Time32Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Time32Builder) AppendEmptyValue() { b.Append(0) } +func (b *Time32Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Time32Builder) UnsafeAppend(v arrow.Time32) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -2446,6 +2546,10 @@ func (b *Time32Builder) Resize(n int) { } } +func (b *Time32Builder) Value(i int) arrow.Time32 { + return b.rawData[i] +} + // NewArray creates a Time32 array from the memory buffers used by the builder and resets the Time32Builder // so it can be used to build a new array. func (b *Time32Builder) NewArray() arrow.Array { @@ -2479,6 +2583,20 @@ func (b *Time32Builder) newData() (data *Data) { return } +func (b *Time32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := arrow.Time32FromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + return nil +} + func (b *Time32Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -2490,7 +2608,6 @@ func (b *Time32Builder) UnmarshalOne(dec *json.Decoder) error { b.AppendNull() case string: tm, err := arrow.Time32FromString(v, b.dtype.Unit) - if err != nil { return &json.UnmarshalTypeError{ Value: v, @@ -2589,10 +2706,22 @@ func (b *Time64Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Time64Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Time64Builder) AppendEmptyValue() { b.Append(0) } +func (b *Time64Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Time64Builder) UnsafeAppend(v arrow.Time64) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -2657,6 +2786,10 @@ func (b *Time64Builder) Resize(n int) { } } +func (b *Time64Builder) Value(i int) arrow.Time64 { + return b.rawData[i] +} + // NewArray creates a Time64 array from the memory buffers used by the builder and resets the Time64Builder // so it can be used to build a new array. func (b *Time64Builder) NewArray() arrow.Array { @@ -2690,6 +2823,20 @@ func (b *Time64Builder) newData() (data *Data) { return } +func (b *Time64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + val, err := arrow.Time64FromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + return nil +} + func (b *Time64Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -2701,7 +2848,6 @@ func (b *Time64Builder) UnmarshalOne(dec *json.Decoder) error { b.AppendNull() case string: tm, err := arrow.Time64FromString(v, b.dtype.Unit) - if err != nil { return &json.UnmarshalTypeError{ Value: v, @@ -2799,10 +2945,22 @@ func (b *Date32Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Date32Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Date32Builder) AppendEmptyValue() { b.Append(0) } +func (b *Date32Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Date32Builder) UnsafeAppend(v arrow.Date32) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -2867,6 +3025,10 @@ func (b *Date32Builder) Resize(n int) { } } +func (b *Date32Builder) Value(i int) arrow.Date32 { + return b.rawData[i] +} + // NewArray creates a Date32 array from the memory buffers used by the builder and resets the Date32Builder // so it can be used to build a new array. func (b *Date32Builder) NewArray() arrow.Array { @@ -2900,6 +3062,20 @@ func (b *Date32Builder) newData() (data *Data) { return } +func (b *Date32Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + tm, err := time.Parse("2006-01-02", s) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.Date32FromTime(tm)) + return nil +} + func (b *Date32Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -3008,10 +3184,22 @@ func (b *Date64Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *Date64Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *Date64Builder) AppendEmptyValue() { b.Append(0) } +func (b *Date64Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *Date64Builder) UnsafeAppend(v arrow.Date64) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -3076,6 +3264,10 @@ func (b *Date64Builder) Resize(n int) { } } +func (b *Date64Builder) Value(i int) arrow.Date64 { + return b.rawData[i] +} + // NewArray creates a Date64 array from the memory buffers used by the builder and resets the Date64Builder // so it can be used to build a new array. func (b *Date64Builder) NewArray() arrow.Array { @@ -3109,6 +3301,20 @@ func (b *Date64Builder) newData() (data *Data) { return } +func (b *Date64Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + tm, err := time.Parse("2006-01-02", s) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.Date64FromTime(tm)) + return nil +} + func (b *Date64Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -3218,10 +3424,22 @@ func (b *DurationBuilder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *DurationBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *DurationBuilder) AppendEmptyValue() { b.Append(0) } +func (b *DurationBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *DurationBuilder) UnsafeAppend(v arrow.Duration) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -3286,6 +3504,10 @@ func (b *DurationBuilder) Resize(n int) { } } +func (b *DurationBuilder) Value(i int) arrow.Duration { + return b.rawData[i] +} + // NewArray creates a Duration array from the memory buffers used by the builder and resets the DurationBuilder // so it can be used to build a new array. func (b *DurationBuilder) NewArray() arrow.Array { @@ -3319,6 +3541,20 @@ func (b *DurationBuilder) newData() (data *Data) { return } +func (b *DurationBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + dur, err := time.ParseDuration(s) + if err != nil { + return err + } + + b.Append(arrow.Duration(dur / b.dtype.Unit.Multiplier())) + return nil +} + func (b *DurationBuilder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -3420,7 +3656,6 @@ var ( _ Builder = (*Uint16Builder)(nil) _ Builder = (*Int8Builder)(nil) _ Builder = (*Uint8Builder)(nil) - _ Builder = (*TimestampBuilder)(nil) _ Builder = (*Time32Builder)(nil) _ Builder = (*Time64Builder)(nil) _ Builder = (*Date32Builder)(nil) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl similarity index 80% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl index c8428452..cf663c03 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen.go.tmpl +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen.go.tmpl @@ -17,11 +17,11 @@ package array import ( - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) {{range .In}} @@ -79,10 +79,22 @@ func (b *{{.Name}}Builder) AppendNull() { b.UnsafeAppendBoolToBitmap(false) } +func (b *{{.Name}}Builder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *{{.Name}}Builder) AppendEmptyValue() { b.Append(0) } +func (b *{{.Name}}Builder) AppendEmptyValues(n int) { + for i := 0; i < n; i ++ { + b.AppendEmptyValue() + } +} + func (b *{{.Name}}Builder) UnsafeAppend(v {{or .QualifiedType .Type}}) { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) b.rawData[b.length] = v @@ -147,6 +159,10 @@ func (b *{{.Name}}Builder) Resize(n int) { } } +func (b *{{.Name}}Builder) Value(i int) {{or .QualifiedType .Type}} { + return b.rawData[i] +} + // NewArray creates a {{.Name}} array from the memory buffers used by the builder and resets the {{.Name}}Builder // so it can be used to build a new array. func (b *{{.Name}}Builder) NewArray() arrow.Array { @@ -184,6 +200,71 @@ func (b *{{.Name}}Builder) newData() (data *Data) { return } +func (b *{{.Name}}Builder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + {{if or (eq .Name "Date32") -}} + tm, err := time.Parse("2006-01-02", s) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.Date32FromTime(tm)) + {{else if or (eq .Name "Date64") -}} + tm, err := time.Parse("2006-01-02", s) + if err != nil { + b.AppendNull() + return err + } + b.Append(arrow.Date64FromTime(tm)) + {{else if or (eq .Name "Time32") -}} + val, err := arrow.Time32FromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + {{else if or (eq .Name "Time64") -}} + val, err := arrow.Time64FromString(s, b.dtype.Unit) + if err != nil { + b.AppendNull() + return err + } + b.Append(val) + {{else if (eq .Name "Duration") -}} + dur, err := time.ParseDuration(s) + if err != nil { + return err + } + + b.Append(arrow.Duration(dur / b.dtype.Unit.Multiplier())) + {{else if or (eq .Name "Int8") (eq .Name "Int16") (eq .Name "Int32") (eq .Name "Int64") -}} + v, err := strconv.ParseInt(s, 10, {{.Size}} * 8) + if err != nil { + b.AppendNull() + return err + } + b.Append({{.name}}(v)) + {{else if or (eq .Name "Uint8") (eq .Name "Uint16") (eq .Name "Uint32") (eq .Name "Uint64") -}} + v, err := strconv.ParseUint(s, 10, {{.Size}} * 8) + if err != nil { + b.AppendNull() + return err + } + b.Append({{.name}}(v)) + {{else if or (eq .Name "Float32") (eq .Name "Float64") -}} + v, err := strconv.ParseFloat(s, {{.Size}} * 8) + if err != nil { + b.AppendNull() + return err + } + b.Append({{.name}}(v)) + {{end -}} + return nil +} + func (b *{{.Name}}Builder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -217,14 +298,9 @@ func (b *{{.Name}}Builder) UnmarshalOne(dec *json.Decoder) error { b.Append({{.QualifiedType}}(n)) case float64: b.Append({{.QualifiedType}}(v)) -{{else if or (eq .Name "Time32") (eq .Name "Time64") (eq .Name "Timestamp") -}} +{{else if or (eq .Name "Time32") (eq .Name "Time64") -}} case string: -{{if (eq .Name "Timestamp") -}} - loc, _ := b.dtype.GetZone() - tm, _, err := arrow.TimestampFromStringInLocation(v, b.dtype.Unit, loc) -{{else -}} tm, err := {{.QualifiedType}}FromString(v, b.dtype.Unit) -{{end}} if err != nil { return &json.UnmarshalTypeError{ Value: v, @@ -275,7 +351,7 @@ func (b *{{.Name}}Builder) UnmarshalOne(dec *json.Decoder) error { break } } - + return &json.UnmarshalTypeError{ Value: v, Type: reflect.TypeOf({{.QualifiedType}}(0)), @@ -342,7 +418,7 @@ func (b *{{.Name}}Builder) UnmarshalOne(dec *json.Decoder) error { func (b *{{.Name}}Builder) Unmarshal(dec *json.Decoder) error { for dec.More() { - if err := b.unmarshalOne(dec); err != nil { + if err := b.UnmarshalOne(dec); err != nil { return err } } @@ -360,7 +436,7 @@ func (b *{{.Name}}Builder) UnmarshalJSON(data []byte) error { return fmt.Errorf("binary builder must unpack from json array, found %s", delim) } - return b.unmarshal(dec) + return b.Unmarshal(dec) } {{end}} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen_test.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl similarity index 78% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen_test.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl index 47bd4294..bc8c9933 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/numericbuilder.gen_test.go.tmpl +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/numericbuilder.gen_test.go.tmpl @@ -19,13 +19,73 @@ package array_test import ( "testing" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/memory" "github.com/stretchr/testify/assert" ) {{range .In}} +func Test{{.Name}}StringRoundTrip(t *testing.T) { + // 1. create array + mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) + defer mem.AssertSize(t, 0) + +{{if .Opt.Parametric -}} +{{ if or (eq .Name "Time64") -}} + dt := &arrow.{{.Name}}Type{Unit: arrow.Microsecond} +{{else -}} + dt := &arrow.{{.Name}}Type{Unit: arrow.Second} +{{end -}} + b := array.New{{.Name}}Builder(mem, dt) +{{else -}} + b := array.New{{.Name}}Builder(mem) +{{end -}} + defer b.Release() + + b.Append(1) + b.Append(2) + b.Append(3) + b.AppendNull() + b.Append(5) + b.Append(6) + b.AppendNull() + b.Append(8) + b.Append(9) + b.Append(10) + + arr := b.NewArray().(*array.{{.Name}}) + defer arr.Release() + + // 2. create array via AppendValueFromString +{{if .Opt.Parametric -}} + b1 := array.New{{.Name}}Builder(mem, dt) +{{else -}} + b1 := array.New{{.Name}}Builder(mem) +{{end -}} + defer b1.Release() + + for i := 0; i < arr.Len(); i++ { + assert.NoError(t, b1.AppendValueFromString(arr.ValueStr(i))) + } + + arr1 := b1.NewArray().(*array.{{.Name}}) + defer arr1.Release() + +{{ if or (eq .Name "Date64") -}} + assert.Exactly(t, arr.Len(), arr1.Len()) + for i := 0; i < arr.Len(); i++ { + assert.Exactly(t, arr.IsValid(i), arr1.IsValid(i)) + assert.Exactly(t, arr.ValueStr(i), arr1.ValueStr(i)) + if arr.IsValid(i) { + assert.Exactly(t, arr.Value(i).ToTime(), arr1.Value(i).ToTime()) + } + } +{{else -}} + assert.True(t, array.Equal(arr, arr1)) +{{end -}} +} + func TestNew{{.Name}}Builder(t *testing.T) { mem := memory.NewCheckedAllocator(memory.NewGoAllocator()) defer mem.AssertSize(t, 0) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/record.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/record.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/record.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/record.go index b00a59d1..0b0fe4c3 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/record.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/record.go @@ -22,10 +22,10 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // RecordReader reads a stream of records. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/string.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/string.go similarity index 92% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/string.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/string.go index dd215b40..86e27c97 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/string.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/string.go @@ -23,9 +23,9 @@ import ( "strings" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // String represents an immutable sequence of variable-length UTF-8 strings. @@ -54,6 +54,13 @@ func (a *String) Value(i int) string { return a.values[a.offsets[i]:a.offsets[i+1]] } +func (a *String) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.Value(i) +} + // ValueOffset returns the offset of the value at index i. func (a *String) ValueOffset(i int) int { if i < 0 || i > a.array.data.length { @@ -66,22 +73,27 @@ func (a *String) ValueOffset64(i int) int64 { return int64(a.ValueOffset(i)) } +func (a *String) ValueLen(i int) int { + if i < 0 || i >= a.array.data.length { + panic("arrow/array: index out of range") + } + beg := a.array.data.offset + i + return int(a.offsets[beg+1] - a.offsets[beg]) +} + func (a *String) ValueOffsets() []int32 { beg := a.array.data.offset end := beg + a.array.data.length + 1 return a.offsets[beg:end] } -func (a *String) ValueBytes() (ret []byte) { +func (a *String) ValueBytes() []byte { beg := a.array.data.offset end := beg + a.array.data.length - data := a.values[a.offsets[beg]:a.offsets[end]] - - s := (*reflect.SliceHeader)(unsafe.Pointer(&ret)) - s.Data = (*reflect.StringHeader)(unsafe.Pointer(&data)).Data - s.Len = len(data) - s.Cap = len(data) - return + if a.array.data.buffers[2] != nil { + return a.array.data.buffers[2].Bytes()[a.offsets[beg]:a.offsets[end]] + } + return nil } func (a *String) String() string { @@ -93,7 +105,7 @@ func (a *String) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%q", a.Value(i)) } @@ -189,6 +201,13 @@ func (a *LargeString) Value(i int) string { return a.values[a.offsets[i]:a.offsets[i+1]] } +func (a *LargeString) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + return a.Value(i) +} + // ValueOffset returns the offset of the value at index i. func (a *LargeString) ValueOffset(i int) int64 { if i < 0 || i > a.array.data.length { @@ -207,16 +226,13 @@ func (a *LargeString) ValueOffsets() []int64 { return a.offsets[beg:end] } -func (a *LargeString) ValueBytes() (ret []byte) { +func (a *LargeString) ValueBytes() []byte { beg := a.array.data.offset end := beg + a.array.data.length - data := a.values[a.offsets[beg]:a.offsets[end]] - - s := (*reflect.SliceHeader)(unsafe.Pointer(&ret)) - s.Data = (*reflect.StringHeader)(unsafe.Pointer(&data)).Data - s.Len = len(data) - s.Cap = len(data) - return + if a.array.data.buffers[2] != nil { + return a.array.data.buffers[2].Bytes()[a.offsets[beg]:a.offsets[end]] + } + return nil } func (a *LargeString) String() string { @@ -228,7 +244,7 @@ func (a *LargeString) String() string { } switch { case a.IsNull(i): - o.WriteString("(null)") + o.WriteString(NullValueStr) default: fmt.Fprintf(o, "%q", a.Value(i)) } @@ -307,7 +323,9 @@ func NewStringBuilder(mem memory.Allocator) *StringBuilder { return b } -func (b *StringBuilder) Type() arrow.DataType { return arrow.BinaryTypes.String } +func (b *StringBuilder) Type() arrow.DataType { + return arrow.BinaryTypes.String +} // Append appends a string to the builder. func (b *StringBuilder) Append(v string) { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/struct.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go similarity index 85% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/struct.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go index b67722ee..248a25bf 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/struct.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/struct.go @@ -23,11 +23,11 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" ) // Struct represents an ordered sequence of relative types. @@ -81,6 +81,19 @@ func NewStructData(data arrow.ArrayData) *Struct { func (a *Struct) NumField() int { return len(a.fields) } func (a *Struct) Field(i int) arrow.Array { return a.fields[i] } +// ValueStr returns the string representation (as json) of the value at index i. +func (a *Struct) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + + data, err := json.Marshal(a.GetOneForMarshal(i)) + if err != nil { + panic(err) + } + return string(data) +} + func (a *Struct) String() string { o := new(strings.Builder) o.WriteString("{") @@ -108,14 +121,15 @@ func (a *Struct) String() string { // newStructFieldWithParentValidityMask returns the Interface at fieldIndex // with a nullBitmapBytes adjusted according on the parent struct nullBitmapBytes. // From the docs: -// "When reading the struct array the parent validity bitmap takes priority." +// +// "When reading the struct array the parent validity bitmap takes priority." func (a *Struct) newStructFieldWithParentValidityMask(fieldIndex int) arrow.Array { field := a.Field(fieldIndex) nullBitmapBytes := field.NullBitmapBytes() maskedNullBitmapBytes := make([]byte, len(nullBitmapBytes)) copy(maskedNullBitmapBytes, nullBitmapBytes) for i := 0; i < field.Len(); i++ { - if !a.IsValid(i) { + if a.IsNull(i) { bitutil.ClearBit(maskedNullBitmapBytes, i) } } @@ -245,7 +259,15 @@ func (b *StructBuilder) Release() { } func (b *StructBuilder) Append(v bool) { - b.Reserve(1) + // Intentionally not calling `Reserve` as it will recursively call + // `Reserve` on the child builders, which during profiling has shown to be + // very expensive due to iterating over children, dynamic dispatch and all + // other code that gets executed even if previously `Reserve` was called to + // preallocate. Not calling `Reserve` has no downsides as when appending to + // the underlying children they already ensure they have enough space + // reserved. The only thing we must do is ensure we have enough space in + // the validity bitmap of the struct builder itself. + b.builder.reserve(1, b.resizeHelper) b.unsafeAppendBoolToBitmap(v) if !v { for _, f := range b.fields { @@ -261,6 +283,12 @@ func (b *StructBuilder) AppendValues(valids []bool) { func (b *StructBuilder) AppendNull() { b.Append(false) } +func (b *StructBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + func (b *StructBuilder) AppendEmptyValue() { b.Append(true) for _, f := range b.fields { @@ -268,6 +296,12 @@ func (b *StructBuilder) AppendEmptyValue() { } } +func (b *StructBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + func (b *StructBuilder) unsafeAppendBoolToBitmap(isValid bool) { if isValid { bitutil.SetBit(b.nullBitmap.Bytes(), b.length) @@ -351,6 +385,19 @@ func (b *StructBuilder) newData() (data *Data) { return } +func (b *StructBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + + if !strings.HasPrefix(s, "{") && !strings.HasSuffix(s, "}") { + return fmt.Errorf("%w: invalid string for struct should be be of form: {*}", arrow.ErrInvalid) + } + dec := json.NewDecoder(strings.NewReader(s)) + return b.UnmarshalOne(dec) +} + func (b *StructBuilder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/table.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/table.go similarity index 87% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/table.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/table.go index 88362c74..6456992e 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/table.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/table.go @@ -20,10 +20,11 @@ import ( "errors" "fmt" "math" + "strings" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) // NewColumnSlice returns a new zero-copy slice of the column with the indicated @@ -132,9 +133,9 @@ func NewTable(schema *arrow.Schema, cols []arrow.Column, rows int64) *simpleTabl // of slices of arrow.Array. // // Like other NewTable functions this can panic if: -// - len(schema.Fields) != len(data) -// - the total length of each column's array slice (ie: number of rows -// in the column) aren't the same for all columns. +// - len(schema.Fields) != len(data) +// - the total length of each column's array slice (ie: number of rows +// in the column) aren't the same for all columns. func NewTableFromSlice(schema *arrow.Schema, data [][]arrow.Array) *simpleTable { if len(data) != len(schema.Fields()) { panic("array/table: mismatch in number of columns and data for creating a table") @@ -197,7 +198,27 @@ func NewTableFromRecords(schema *arrow.Schema, recs []arrow.Record) *simpleTable return NewTable(schema, cols, -1) } -func (tbl *simpleTable) Schema() *arrow.Schema { return tbl.schema } +func (tbl *simpleTable) Schema() *arrow.Schema { return tbl.schema } + +func (tbl *simpleTable) AddColumn(i int, field arrow.Field, column arrow.Column) (arrow.Table, error) { + if int64(column.Len()) != tbl.rows { + return nil, fmt.Errorf("arrow/array: column length mismatch: %d != %d", column.Len(), tbl.rows) + } + if field.Type != column.DataType() { + return nil, fmt.Errorf("arrow/array: column type mismatch: %v != %v", field.Type, column.DataType()) + } + newSchema, err := tbl.schema.AddField(i, field) + if err != nil { + return nil, err + } + cols := make([]arrow.Column, len(tbl.cols)+1) + copy(cols[:i], tbl.cols[:i]) + cols[i] = column + copy(cols[i+1:], tbl.cols[i:]) + newTable := NewTable(newSchema, cols, tbl.rows) + return newTable, nil +} + func (tbl *simpleTable) NumRows() int64 { return tbl.rows } func (tbl *simpleTable) NumCols() int64 { return int64(len(tbl.cols)) } func (tbl *simpleTable) Column(i int) *arrow.Column { return &tbl.cols[i] } @@ -237,6 +258,25 @@ func (tbl *simpleTable) Release() { } } +func (tbl *simpleTable) String() string { + o := new(strings.Builder) + o.WriteString(tbl.Schema().String()) + o.WriteString("\n") + + for i := 0; i < int(tbl.NumCols()); i++ { + col := tbl.Column(i) + o.WriteString(col.Field().Name + ": [") + for j, chunk := range col.Data().Chunks() { + if j != 0 { + o.WriteString(", ") + } + o.WriteString(chunk.String()) + } + o.WriteString("]\n") + } + return o.String() +} + // TableReader is a Record iterator over a (possibly chunked) Table type TableReader struct { refCount int64 diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go new file mode 100644 index 00000000..2928b1fc --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/timestamp.go @@ -0,0 +1,381 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package array + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "sync/atomic" + "time" + + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/json" +) + +// Timestamp represents an immutable sequence of arrow.Timestamp values. +type Timestamp struct { + array + values []arrow.Timestamp +} + +// NewTimestampData creates a new Timestamp from Data. +func NewTimestampData(data arrow.ArrayData) *Timestamp { + a := &Timestamp{} + a.refCount = 1 + a.setData(data.(*Data)) + return a +} + +// Reset resets the array for re-use. +func (a *Timestamp) Reset(data *Data) { + a.setData(data) +} + +// Value returns the value at the specified index. +func (a *Timestamp) Value(i int) arrow.Timestamp { return a.values[i] } + +// TimestampValues returns the values. +func (a *Timestamp) TimestampValues() []arrow.Timestamp { return a.values } + +// String returns a string representation of the array. +func (a *Timestamp) String() string { + o := new(strings.Builder) + o.WriteString("[") + for i, v := range a.values { + if i > 0 { + fmt.Fprintf(o, " ") + } + switch { + case a.IsNull(i): + o.WriteString(NullValueStr) + default: + fmt.Fprintf(o, "%v", v) + } + } + o.WriteString("]") + return o.String() +} + +func (a *Timestamp) setData(data *Data) { + a.array.setData(data) + vals := data.buffers[1] + if vals != nil { + a.values = arrow.TimestampTraits.CastFromBytes(vals.Bytes()) + beg := a.array.data.offset + end := beg + a.array.data.length + a.values = a.values[beg:end] + } +} + +func (a *Timestamp) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + + dt := a.DataType().(*arrow.TimestampType) + z, _ := dt.GetZone() + return a.values[i].ToTime(dt.Unit).In(z).Format("2006-01-02 15:04:05.999999999Z0700") +} + +func (a *Timestamp) GetOneForMarshal(i int) interface{} { + if a.IsNull(i) { + return nil + } + return a.values[i].ToTime(a.DataType().(*arrow.TimestampType).Unit).Format("2006-01-02 15:04:05.999999999") +} + +func (a *Timestamp) MarshalJSON() ([]byte, error) { + vals := make([]interface{}, a.Len()) + for i := range a.values { + vals[i] = a.GetOneForMarshal(i) + } + + return json.Marshal(vals) +} + +func arrayEqualTimestamp(left, right *Timestamp) bool { + for i := 0; i < left.Len(); i++ { + if left.IsNull(i) { + continue + } + if left.Value(i) != right.Value(i) { + return false + } + } + return true +} + +type TimestampBuilder struct { + builder + + dtype *arrow.TimestampType + data *memory.Buffer + rawData []arrow.Timestamp +} + +func NewTimestampBuilder(mem memory.Allocator, dtype *arrow.TimestampType) *TimestampBuilder { + return &TimestampBuilder{builder: builder{refCount: 1, mem: mem}, dtype: dtype} +} + +func (b *TimestampBuilder) Type() arrow.DataType { return b.dtype } + +// Release decreases the reference count by 1. +// When the reference count goes to zero, the memory is freed. +func (b *TimestampBuilder) Release() { + debug.Assert(atomic.LoadInt64(&b.refCount) > 0, "too many releases") + + if atomic.AddInt64(&b.refCount, -1) == 0 { + if b.nullBitmap != nil { + b.nullBitmap.Release() + b.nullBitmap = nil + } + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + } +} + +func (b *TimestampBuilder) AppendTime(t time.Time) { + ts, err := arrow.TimestampFromTime(t, b.dtype.Unit) + if err != nil { + panic(err) + } + b.Append(ts) +} + +func (b *TimestampBuilder) Append(v arrow.Timestamp) { + b.Reserve(1) + b.UnsafeAppend(v) +} + +func (b *TimestampBuilder) AppendNull() { + b.Reserve(1) + b.UnsafeAppendBoolToBitmap(false) +} + +func (b *TimestampBuilder) AppendNulls(n int) { + for i := 0; i < n; i++ { + b.AppendNull() + } +} + +func (b *TimestampBuilder) AppendEmptyValue() { + b.Append(0) +} + +func (b *TimestampBuilder) AppendEmptyValues(n int) { + for i := 0; i < n; i++ { + b.AppendEmptyValue() + } +} + +func (b *TimestampBuilder) UnsafeAppend(v arrow.Timestamp) { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + b.rawData[b.length] = v + b.length++ +} + +func (b *TimestampBuilder) UnsafeAppendBoolToBitmap(isValid bool) { + if isValid { + bitutil.SetBit(b.nullBitmap.Bytes(), b.length) + } else { + b.nulls++ + } + b.length++ +} + +// AppendValues will append the values in the v slice. The valid slice determines which values +// in v are valid (not null). The valid slice must either be empty or be equal in length to v. If empty, +// all values in v are appended and considered valid. +func (b *TimestampBuilder) AppendValues(v []arrow.Timestamp, valid []bool) { + if len(v) != len(valid) && len(valid) != 0 { + panic("len(v) != len(valid) && len(valid) != 0") + } + + if len(v) == 0 { + return + } + + b.Reserve(len(v)) + arrow.TimestampTraits.Copy(b.rawData[b.length:], v) + b.builder.unsafeAppendBoolsToBitmap(valid, len(v)) +} + +func (b *TimestampBuilder) init(capacity int) { + b.builder.init(capacity) + + b.data = memory.NewResizableBuffer(b.mem) + bytesN := arrow.TimestampTraits.BytesRequired(capacity) + b.data.Resize(bytesN) + b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes()) +} + +// Reserve ensures there is enough space for appending n elements +// by checking the capacity and calling Resize if necessary. +func (b *TimestampBuilder) Reserve(n int) { + b.builder.reserve(n, b.Resize) +} + +// Resize adjusts the space allocated by b to n elements. If n is greater than b.Cap(), +// additional memory will be allocated. If n is smaller, the allocated memory may reduced. +func (b *TimestampBuilder) Resize(n int) { + nBuilder := n + if n < minBuilderCapacity { + n = minBuilderCapacity + } + + if b.capacity == 0 { + b.init(n) + } else { + b.builder.resize(nBuilder, b.init) + b.data.Resize(arrow.TimestampTraits.BytesRequired(n)) + b.rawData = arrow.TimestampTraits.CastFromBytes(b.data.Bytes()) + } +} + +// NewArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder +// so it can be used to build a new array. +func (b *TimestampBuilder) NewArray() arrow.Array { + return b.NewTimestampArray() +} + +// NewTimestampArray creates a Timestamp array from the memory buffers used by the builder and resets the TimestampBuilder +// so it can be used to build a new array. +func (b *TimestampBuilder) NewTimestampArray() (a *Timestamp) { + data := b.newData() + a = NewTimestampData(data) + data.Release() + return +} + +func (b *TimestampBuilder) newData() (data *Data) { + bytesRequired := arrow.TimestampTraits.BytesRequired(b.length) + if bytesRequired > 0 && bytesRequired < b.data.Len() { + // trim buffers + b.data.Resize(bytesRequired) + } + data = NewData(b.dtype, b.length, []*memory.Buffer{b.nullBitmap, b.data}, nil, b.nulls, 0) + b.reset() + + if b.data != nil { + b.data.Release() + b.data = nil + b.rawData = nil + } + + return +} + +func (b *TimestampBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + + loc, err := b.dtype.GetZone() + if err != nil { + return err + } + + v, _, err := arrow.TimestampFromStringInLocation(s, b.dtype.Unit, loc) + if err != nil { + b.AppendNull() + return err + } + b.Append(v) + return nil +} + +func (b *TimestampBuilder) UnmarshalOne(dec *json.Decoder) error { + t, err := dec.Token() + if err != nil { + return err + } + + switch v := t.(type) { + case nil: + b.AppendNull() + case string: + loc, _ := b.dtype.GetZone() + tm, _, err := arrow.TimestampFromStringInLocation(v, b.dtype.Unit, loc) + if err != nil { + return &json.UnmarshalTypeError{ + Value: v, + Type: reflect.TypeOf(arrow.Timestamp(0)), + Offset: dec.InputOffset(), + } + } + + b.Append(tm) + case json.Number: + n, err := v.Int64() + if err != nil { + return &json.UnmarshalTypeError{ + Value: v.String(), + Type: reflect.TypeOf(arrow.Timestamp(0)), + Offset: dec.InputOffset(), + } + } + b.Append(arrow.Timestamp(n)) + case float64: + b.Append(arrow.Timestamp(v)) + + default: + return &json.UnmarshalTypeError{ + Value: fmt.Sprint(t), + Type: reflect.TypeOf(arrow.Timestamp(0)), + Offset: dec.InputOffset(), + } + } + + return nil +} + +func (b *TimestampBuilder) Unmarshal(dec *json.Decoder) error { + for dec.More() { + if err := b.UnmarshalOne(dec); err != nil { + return err + } + } + return nil +} + +func (b *TimestampBuilder) UnmarshalJSON(data []byte) error { + dec := json.NewDecoder(bytes.NewReader(data)) + t, err := dec.Token() + if err != nil { + return err + } + + if delim, ok := t.(json.Delim); !ok || delim != '[' { + return fmt.Errorf("binary builder must unpack from json array, found %s", delim) + } + + return b.Unmarshal(dec) +} + +var ( + _ arrow.Array = (*Timestamp)(nil) + _ Builder = (*TimestampBuilder)(nil) +) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/union.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/union.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/union.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/union.go index 19088118..869355ac 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/union.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/union.go @@ -25,12 +25,12 @@ import ( "strings" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/internal/bitutils" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/bitutils" + "github.com/apache/arrow/go/v14/internal/json" ) // Union is a convenience interface to encompass both Sparse and Dense @@ -343,6 +343,24 @@ func (a *SparseUnion) MarshalJSON() ([]byte, error) { return buf.Bytes(), nil } +func (a *SparseUnion) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + + val := a.GetOneForMarshal(i) + if val == nil { + // child is nil + return NullValueStr + } + + data, err := json.Marshal(val) + if err != nil { + panic(err) + } + return string(data) +} + func (a *SparseUnion) String() string { var b strings.Builder b.WriteByte('[') @@ -576,12 +594,12 @@ func (a *DenseUnion) GetOneForMarshal(i int) interface{} { childID := a.ChildID(i) data := a.Field(childID) - offsets := a.RawValueOffsets() - if data.IsNull(int(offsets[i])) { + offset := int(a.RawValueOffsets()[i]) + if data.IsNull(offset) { return nil } - return []interface{}{typeID, data.GetOneForMarshal(int(offsets[i]))} + return []interface{}{typeID, data.GetOneForMarshal(offset)} } func (a *DenseUnion) MarshalJSON() ([]byte, error) { @@ -601,6 +619,24 @@ func (a *DenseUnion) MarshalJSON() ([]byte, error) { return buf.Bytes(), nil } +func (a *DenseUnion) ValueStr(i int) string { + if a.IsNull(i) { + return NullValueStr + } + + val := a.GetOneForMarshal(i) + if val == nil { + // child in nil + return NullValueStr + } + + data, err := json.Marshal(val) + if err != nil { + panic(err) + } + return string(data) +} + func (a *DenseUnion) String() string { var b strings.Builder b.WriteByte('[') @@ -667,10 +703,6 @@ func arrayDenseUnionApproxEqual(l, r *DenseUnion, opt equalOption) bool { // either Dense or Sparse mode. type UnionBuilder interface { Builder - // AppendNulls appends n nulls to the array - AppendNulls(n int) - // AppendEmptyValues appends n empty zero values to the array - AppendEmptyValues(n int) // AppendChild allows constructing the union type on the fly by making new // new array builder available to the union builder. The type code (index) // of the new child is returned, which should be passed to the Append method @@ -713,8 +745,8 @@ func newUnionBuilder(mem memory.Allocator, children []Builder, typ arrow.UnionTy mode: typ.Mode(), codes: typ.TypeCodes(), children: children, - typeIDtoChildID: make([]int, typ.MaxTypeCode()+1), - typeIDtoBuilder: make([]Builder, typ.MaxTypeCode()+1), + typeIDtoChildID: make([]int, int(typ.MaxTypeCode())+1), // convert to int as int8(127) +1 panics + typeIDtoBuilder: make([]Builder, int(typ.MaxTypeCode())+1), // convert to int as int8(127) +1 panics childFields: make([]arrow.Field, len(children)), typesBuilder: newInt8BufferBuilder(mem), } @@ -987,6 +1019,15 @@ func (b *SparseUnionBuilder) Unmarshal(dec *json.Decoder) error { return nil } +func (b *SparseUnionBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + b.AppendNull() + return nil + } + dec := json.NewDecoder(strings.NewReader(s)) + return b.UnmarshalOne(dec) +} + func (b *SparseUnionBuilder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { @@ -1088,10 +1129,15 @@ func NewEmptyDenseUnionBuilder(mem memory.Allocator) *DenseUnionBuilder { // children and type codes. Builders will be constructed for each child // using the fields in typ func NewDenseUnionBuilder(mem memory.Allocator, typ *arrow.DenseUnionType) *DenseUnionBuilder { - children := make([]Builder, len(typ.Fields())) - for i, f := range typ.Fields() { - children[i] = NewBuilder(mem, f.Type) - defer children[i].Release() + children := make([]Builder, 0, len(typ.Fields())) + defer func() { + for _, child := range children { + child.Release() + } + }() + + for _, f := range typ.Fields() { + children = append(children, NewBuilder(mem, f.Type)) } return NewDenseUnionBuilderWithBuilders(mem, typ, children) } @@ -1232,6 +1278,15 @@ func (b *DenseUnionBuilder) Unmarshal(dec *json.Decoder) error { return nil } +func (d *DenseUnionBuilder) AppendValueFromString(s string) error { + if s == NullValueStr { + d.AppendNull() + return nil + } + dec := json.NewDecoder(strings.NewReader(s)) + return d.UnmarshalOne(dec) +} + func (b *DenseUnionBuilder) UnmarshalOne(dec *json.Decoder) error { t, err := dec.Token() if err != nil { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/array/util.go b/vendor/github.com/apache/arrow/go/v14/arrow/array/util.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v12/arrow/array/util.go rename to vendor/github.com/apache/arrow/go/v14/arrow/array/util.go index 897bae7a..54d15a80 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/array/util.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/array/util.go @@ -22,11 +22,11 @@ import ( "io" "strings" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/internal/hashing" - "github.com/goccy/go-json" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/hashing" + "github.com/apache/arrow/go/v14/internal/json" ) func min(a, b int) int { @@ -82,15 +82,16 @@ func WithUseNumber() FromJSONOption { // using the json.Marshal function // // The JSON provided must be formatted in one of two ways: -// Default: the top level of the json must be a list which matches the type specified exactly -// Example: `[1, 2, 3, 4, 5]` for any integer type or `[[...], null, [], .....]` for a List type -// Struct arrays are represented a list of objects: `[{"foo": 1, "bar": "moo"}, {"foo": 5, "bar": "baz"}]` // -// Using WithMultipleDocs: -// If the JSON provided is multiple newline separated json documents, then use this option -// and each json document will be treated as a single row of the array. This is most useful for record batches -// and interacting with other processes that use json. For example: -// `{"col1": 1, "col2": "row1", "col3": ...}\n{"col1": 2, "col2": "row2", "col3": ...}\n.....` +// Default: the top level of the json must be a list which matches the type specified exactly +// Example: `[1, 2, 3, 4, 5]` for any integer type or `[[...], null, [], .....]` for a List type +// Struct arrays are represented a list of objects: `[{"foo": 1, "bar": "moo"}, {"foo": 5, "bar": "baz"}]` +// +// Using WithMultipleDocs: +// If the JSON provided is multiple newline separated json documents, then use this option +// and each json document will be treated as a single row of the array. This is most useful for record batches +// and interacting with other processes that use json. For example: +// `{"col1": 1, "col2": "row1", "col3": ...}\n{"col1": 2, "col2": "row2", "col3": ...}\n.....` // // Duration values get formated upon marshalling as a string consisting of their numeric // value followed by the unit suffix such as "10s" for a value of 10 and unit of Seconds. @@ -100,23 +101,25 @@ func WithUseNumber() FromJSONOption { // to the same values which are output. // // Interval types are marshalled / unmarshalled as follows: -// MonthInterval is marshalled as an object with the format: -// { "months": #} -// DayTimeInterval is marshalled using Go's regular marshalling of structs: -// { "days": #, "milliseconds": # } -// MonthDayNanoInterval values are marshalled the same as DayTime using Go's struct marshalling: -// { "months": #, "days": #, "nanoseconds": # } +// +// MonthInterval is marshalled as an object with the format: +// { "months": #} +// DayTimeInterval is marshalled using Go's regular marshalling of structs: +// { "days": #, "milliseconds": # } +// MonthDayNanoInterval values are marshalled the same as DayTime using Go's struct marshalling: +// { "months": #, "days": #, "nanoseconds": # } // // Times use a format of HH:MM or HH:MM:SS[.zzz] where the fractions of a second cannot // exceed the precision allowed by the time unit, otherwise unmarshalling will error. // -// Dates use YYYY-MM-DD format +// # Dates use YYYY-MM-DD format // // Timestamps use RFC3339Nano format except without a timezone, all of the following are valid: -// YYYY-MM-DD -// YYYY-MM-DD[T]HH -// YYYY-MM-DD[T]HH:MM -// YYYY-MM-DD[T]HH:MM:SS[.zzzzzzzzzz] +// +// YYYY-MM-DD +// YYYY-MM-DD[T]HH +// YYYY-MM-DD[T]HH:MM +// YYYY-MM-DD[T]HH:MM:SS[.zzzzzzzzzz] // // The fractions of a second cannot exceed the precision allowed by the timeunit of the datatype. // diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/arrio/arrio.go b/vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/arrio/arrio.go rename to vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go index 45d11546..466a93a6 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/arrio/arrio.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/arrio/arrio.go @@ -22,7 +22,7 @@ import ( "errors" "io" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) // Reader is the interface that wraps the Read method. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/Makefile b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/Makefile similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/Makefile rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/Makefile diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_arm64.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_arm64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_arm64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_arm64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_avx2_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_noasm.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_noasm.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_noasm.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_noasm.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_ppc64le.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_ppc64le.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_ppc64le.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_ppc64le.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_s390x.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_s390x.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_s390x.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_s390x.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmap_ops_sse4_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmap_ops_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmaps.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmaps.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go index d38ba5d4..2e9c0601 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitmaps.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitmaps.go @@ -22,9 +22,9 @@ import ( "math/bits" "unsafe" - "github.com/apache/arrow/go/v12/arrow/endian" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" ) // BitmapReader is a simple bitmap reader for a byte slice. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitutil.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitutil.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go index 8c9e97cd..a4a1519b 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/bitutil.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/bitutil.go @@ -22,7 +22,7 @@ import ( "reflect" "unsafe" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/memory" ) var ( @@ -150,15 +150,12 @@ const ( ) func bytesToUint64(b []byte) []uint64 { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - var res []uint64 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / uint64SizeBytes - s.Cap = h.Cap / uint64SizeBytes + if cap(b) < uint64SizeBytes { + return nil + } - return res + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + return unsafe.Slice((*uint64)(unsafe.Pointer(h.Data)), cap(b)/uint64SizeBytes)[:len(b)/uint64SizeBytes] } var ( diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_default.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_default.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_default.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_default.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_s390x.go b/vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_s390x.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/bitutil/endian_s390x.go rename to vendor/github.com/apache/arrow/go/v14/arrow/bitutil/endian_s390x.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compare.go b/vendor/github.com/apache/arrow/go/v14/arrow/compare.go similarity index 80% rename from vendor/github.com/apache/arrow/go/v12/arrow/compare.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compare.go index 04f9b339..58569b33 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compare.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compare.go @@ -29,7 +29,7 @@ type typeEqualsConfig struct { type TypeEqualOption func(*typeEqualsConfig) // CheckMetadata is an option for TypeEqual that allows checking for metadata -// equality besides type equality. It only makes sense for STRUCT type. +// equality besides type equality. It only makes sense for types with metadata. func CheckMetadata() TypeEqualOption { return func(cfg *typeEqualsConfig) { cfg.metadata = true @@ -58,18 +58,40 @@ func TypeEqual(left, right DataType, opts ...TypeEqualOption) bool { if !TypeEqual(l.Elem(), right.(*ListType).Elem(), opts...) { return false } - if cfg.metadata { - return l.elem.Metadata.Equal(right.(*ListType).elem.Metadata) + if cfg.metadata && !l.elem.Metadata.Equal(right.(*ListType).elem.Metadata) { + return false } return l.elem.Nullable == right.(*ListType).elem.Nullable case *FixedSizeListType: if !TypeEqual(l.Elem(), right.(*FixedSizeListType).Elem(), opts...) { return false } - if cfg.metadata { - return l.elem.Metadata.Equal(right.(*FixedSizeListType).elem.Metadata) + if cfg.metadata && !l.elem.Metadata.Equal(right.(*FixedSizeListType).elem.Metadata) { + return false } return l.n == right.(*FixedSizeListType).n && l.elem.Nullable == right.(*FixedSizeListType).elem.Nullable + case *MapType: + if !TypeEqual(l.KeyType(), right.(*MapType).KeyType(), opts...) { + return false + } + if !TypeEqual(l.ItemType(), right.(*MapType).ItemType(), opts...) { + return false + } + if l.KeyField().Nullable != right.(*MapType).KeyField().Nullable { + return false + } + if l.ItemField().Nullable != right.(*MapType).ItemField().Nullable { + return false + } + if cfg.metadata { + if !l.KeyField().Metadata.Equal(right.(*MapType).KeyField().Metadata) { + return false + } + if !l.ItemField().Metadata.Equal(right.(*MapType).ItemField().Metadata) { + return false + } + } + return true case *StructType: r := right.(*StructType) switch { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/arithmetic.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/arithmetic.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/arithmetic.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/arithmetic.go index 45d8d773..2fb95f06 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/arithmetic.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/arithmetic.go @@ -22,12 +22,12 @@ import ( "context" "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/compute/internal/kernels" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/scalar" ) type ( @@ -627,6 +627,8 @@ func RegisterScalarArithmetic(reg FunctionRegistry) { }{ {"sub_unchecked", kernels.OpSub, decPromoteAdd, subUncheckedDoc}, {"sub", kernels.OpSubChecked, decPromoteAdd, subDoc}, + {"subtract_unchecked", kernels.OpSub, decPromoteAdd, subUncheckedDoc}, + {"subtract", kernels.OpSubChecked, decPromoteAdd, subDoc}, } for _, o := range ops { @@ -909,7 +911,7 @@ func RegisterScalarArithmetic(reg FunctionRegistry) { reg.AddFunction(fn, false) } - fn = &arithmeticFunction{*NewScalarFunction("bit_wise_not", Unary(), EmptyFuncDoc), decPromoteNone} + fn = &arithmeticFunction{*NewScalarFunction("bit_wise_not", Unary(), bitWiseNotDoc), decPromoteNone} for _, k := range kernels.GetBitwiseUnaryKernels() { if err := fn.AddKernel(k); err != nil { panic(err) @@ -1087,10 +1089,9 @@ func Negate(ctx context.Context, opts ArithmeticOptions, input Datum) (Datum, er // Sign returns -1, 0, or 1 depending on the sign of each element in the // input. For x in the input: // -// if x > 0: 1 -// if x < 0: -1 -// if x == 0: 0 -// +// if x > 0: 1 +// if x < 0: -1 +// if x == 0: 0 func Sign(ctx context.Context, input Datum) (Datum, error) { return CallFunction(ctx, "sign", nil, input) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/cast.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/cast.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/cast.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/cast.go index 67504349..8b720a2b 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/cast.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/cast.go @@ -23,11 +23,11 @@ import ( "fmt" "sync" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/compute/internal/kernels" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" ) var ( diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/datum.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/datum.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/datum.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/datum.go index dbb60547..1d3c1b4d 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/datum.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/datum.go @@ -21,9 +21,9 @@ package compute import ( "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/scalar" ) //go:generate go run golang.org/x/tools/cmd/stringer -type=DatumKind -linecomment diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/datumkind_string.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/datumkind_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/datumkind_string.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/datumkind_string.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/doc.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/doc.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/exec.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/exec.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/exec.go index 89fa1641..84e3310c 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/exec.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec.go @@ -22,9 +22,9 @@ import ( "context" "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) func haveChunkedArray(values []Datum) bool { @@ -77,20 +77,20 @@ func execInternal(ctx context.Context, fn Function, opts FunctionOptions, passed var ( k exec.Kernel - executor kernelExecutor + executor KernelExecutor ) switch fn.Kind() { case FuncScalar: executor = scalarExecPool.Get().(*scalarExecutor) defer func() { - executor.clear() + executor.Clear() scalarExecPool.Put(executor.(*scalarExecutor)) }() case FuncVector: executor = vectorExecPool.Get().(*vectorExecutor) defer func() { - executor.clear() + executor.Clear() vectorExecPool.Put(executor.(*vectorExecutor)) }() default: diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/hash_util.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/hash_util.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/hash_util.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/hash_util.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/kernel.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/kernel.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/kernel.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/kernel.go index 92d40910..327426da 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/kernel.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/kernel.go @@ -24,10 +24,10 @@ import ( "hash/maphash" "strings" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" "golang.org/x/exp/slices" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/span.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/span.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/span.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/span.go index 4221d6c0..b6d240fa 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/span.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/span.go @@ -23,11 +23,11 @@ import ( "sync/atomic" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/scalar" ) // BufferSpan is a lightweight Buffer holder for ArraySpans that does not diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/utils.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/utils.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/utils.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/utils.go index 0e4cdf65..6d83b75d 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/exec/utils.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/exec/utils.go @@ -25,13 +25,13 @@ import ( "sync/atomic" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/float16" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow/go/v14/arrow/memory" "golang.org/x/exp/constraints" "golang.org/x/exp/slices" ) @@ -239,21 +239,21 @@ func RechunkArraysConsistently(groups [][]arrow.Array) [][]arrow.Array { } rechunked := make([][]arrow.Array, len(groups)) - offsets := make([]int, len(groups)) + offsets := make([]int64, len(groups)) // scan all array vectors at once, rechunking along the way var start int64 for start < int64(totalLen) { // first compute max possible length for next chunk - chunkLength := math.MaxInt64 + var chunkLength int64 = math.MaxInt64 for i, g := range groups { offset := offsets[i] // skip any done arrays including 0-length - for offset == g[0].Len() { + for offset == int64(g[0].Len()) { g = g[1:] offset = 0 } arr := g[0] - chunkLength = Min(chunkLength, arr.Len()-offset) + chunkLength = Min(chunkLength, int64(arr.Len())-offset) offsets[i] = offset groups[i] = g @@ -263,7 +263,7 @@ func RechunkArraysConsistently(groups [][]arrow.Array) [][]arrow.Array { for i, g := range groups { offset := offsets[i] arr := g[0] - if offset == 0 && arr.Len() == chunkLength { + if offset == 0 && int64(arr.Len()) == chunkLength { // slice spans entire array arr.Retain() rechunked[i] = append(rechunked[i], arr) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/executor.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/executor.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/executor.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/executor.go index 96cbdf40..6da7ed12 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/executor.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/executor.go @@ -25,14 +25,14 @@ import ( "runtime" "sync" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/internal" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/internal" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/scalar" ) // ExecCtx holds simple contextual information for execution @@ -88,11 +88,11 @@ var ( // then be modified to set into a context. // // The default exec context uses the following values: -// - ChunkSize = DefaultMaxChunkSize (MaxInt64) -// - PreallocContiguous = true -// - Registry = GetFunctionRegistry() -// - ExecChannelSize = 10 -// - NumParallel = runtime.NumCPU() +// - ChunkSize = DefaultMaxChunkSize (MaxInt64) +// - PreallocContiguous = true +// - Registry = GetFunctionRegistry() +// - ExecChannelSize = 10 +// - NumParallel = runtime.NumCPU() func DefaultExecCtx() ExecCtx { return defaultExecCtx } func init() { @@ -131,7 +131,7 @@ type ExecBatch struct { Values []Datum // Guarantee is a predicate Expression guaranteed to evaluate to true for // all rows in this batch. - Guarantee Expression + // Guarantee Expression // Len is the semantic length of this ExecBatch. When the values are // all scalars, the length should be set to 1 for non-aggregate kernels. // Otherwise the length is taken from the array values. Aggregate kernels @@ -384,9 +384,9 @@ func inferBatchLength(values []Datum) (length int64, allSame bool) { return } -// kernelExecutor is the interface for all executors to initialize and +// KernelExecutor is the interface for all executors to initialize and // call kernel execution functions on batches. -type kernelExecutor interface { +type KernelExecutor interface { // Init must be called *after* the kernel's init method and any // KernelState must be set into the KernelCtx *before* calling // this Init method. This is to faciliate the case where @@ -407,8 +407,8 @@ type kernelExecutor interface { // CheckResultType checks the actual result type against the resolved // output type. If the types don't match an error is returned CheckResultType(out Datum) error - - clear() + // Clear resets the state in the executor so that it can be reused. + Clear() } // the base implementation for executing non-aggregate kernels. @@ -422,7 +422,7 @@ type nonAggExecImpl struct { preallocValidity bool } -func (e *nonAggExecImpl) clear() { +func (e *nonAggExecImpl) Clear() { e.ctx, e.kernel, e.outType = nil, nil, nil if e.dataPrealloc != nil { e.dataPrealloc = e.dataPrealloc[:0] @@ -479,6 +479,8 @@ func (e *nonAggExecImpl) CheckResultType(out Datum) error { type spanIterator func() (exec.ExecSpan, int64, bool) +func NewScalarExecutor() KernelExecutor { return &scalarExecutor{} } + type scalarExecutor struct { nonAggExecImpl @@ -1005,6 +1007,9 @@ func (v *vectorExecutor) WrapResults(ctx context.Context, out <-chan Datum, hasC case <-ctx.Done(): return nil case output = <-out: + if output == nil { + return nil + } // if the inputs contained at least one chunked array // then we want to return chunked output if hasChunked { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/expression.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/expression.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/expression.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/expression.go index c29c6221..9f20c970 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/expression.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/expression.go @@ -28,23 +28,26 @@ import ( "strconv" "strings" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/compute/internal/kernels" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/ipc" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/ipc" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/scalar" ) var hashSeed = maphash.MakeSeed() // Expression is an interface for mapping one datum to another. An expression // is one of: +// // A literal Datum -// A reference to a single (potentially nested) field of an input Datum +// A reference to a single (potentially nested) field of an input Datum // A call to a compute function, with arguments specified by other Expressions +// +// Deprecated: use substrait-go expressions instead. type Expression interface { fmt.Stringer // IsBound returns true if this expression has been bound to a particular @@ -95,6 +98,8 @@ func printDatum(datum Datum) string { // Literal is an expression denoting a literal Datum which could be any value // as a scalar, an array, or so on. +// +// Deprecated: use substrait-go expressions Literal instead. type Literal struct { Literal Datum } @@ -144,6 +149,8 @@ func (l *Literal) Release() { // Parameter represents a field reference and needs to be bound in order to determine // its type and shape. +// +// Deprecated: use substrait-go field references instead. type Parameter struct { ref *FieldRef @@ -265,6 +272,8 @@ func optionsToString(fn FunctionOptions) string { // Call is a function call with specific arguments which are themselves other // expressions. A call can also have options that are specific to the function // in question. It must be bound to determine the shape and type. +// +// Deprecated: use substrait-go expression functions instead. type Call struct { funcName string args []Expression diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/fieldref.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/fieldref.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/fieldref.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/fieldref.go index be70b37a..ee6f3994 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/fieldref.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/fieldref.go @@ -27,8 +27,8 @@ import ( "unicode" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" ) var ( diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/funckind_string.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/funckind_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/funckind_string.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/funckind_string.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/functions.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/functions.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/functions.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/functions.go index d13bdaa6..a1905f91 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/functions.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/functions.go @@ -23,8 +23,8 @@ import ( "fmt" "strings" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/exec" ) type Function interface { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/Makefile b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/Makefile similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/Makefile rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/Makefile diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic.go index cdaadb47..67e80af7 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic.go @@ -24,11 +24,11 @@ import ( "math/bits" "github.com/JohnCGriffin/overflow" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/internal/debug" "golang.org/x/exp/constraints" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_amd64.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_amd64.go index 22a5dedd..0e78e6c9 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_amd64.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_amd64.go @@ -21,8 +21,8 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/internal/debug" "golang.org/x/exp/constraints" "golang.org/x/sys/cpu" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go index 79bddf02..29cce783 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go index bcc9af02..e9b03551 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/base_arithmetic_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/basic_arithmetic_noasm.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/basic_arithmetic_noasm.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/basic_arithmetic_noasm.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/basic_arithmetic_noasm.go index c665e14c..4f160a14 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/basic_arithmetic_noasm.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/basic_arithmetic_noasm.go @@ -19,7 +19,7 @@ package kernels import ( - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" + "github.com/apache/arrow/go/v14/arrow/compute/exec" "golang.org/x/exp/constraints" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/boolean_cast.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/boolean_cast.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/boolean_cast.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/boolean_cast.go index cf7f577a..18d04c84 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/boolean_cast.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/boolean_cast.go @@ -22,9 +22,9 @@ import ( "strconv" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" ) func isNonZero[T exec.FixedWidthTypes](ctx *exec.KernelCtx, in []T, out []byte) error { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast.go index 4a2b167b..5a71206b 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast.go @@ -19,9 +19,9 @@ package kernels import ( - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/compute/exec" ) type CastOptions struct { @@ -46,6 +46,7 @@ type CastState = CastOptions // This can be used for casting a type to itself, or for casts between // equivalent representations such as Int32 and Date32. func ZeroCopyCastExec(_ *exec.KernelCtx, batch *exec.ExecSpan, out *exec.ExecResult) error { + out.Release() dt := out.Type *out = batch.Values[0].Array out.Type = dt diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric.go index 9a28b5ab..4e5c5c1d 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) var castNumericUnsafe func(itype, otype arrow.Type, in, out []byte, len int) = castNumericGo diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go index 2dfa4233..6b28441e 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go index f91576f3..d53a4486 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" "golang.org/x/sys/cpu" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_neon_arm64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go index 55269651..1cbea033 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_numeric_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_temporal.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_temporal.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_temporal.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_temporal.go index 81c03ad5..82fce1e3 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/cast_temporal.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/cast_temporal.go @@ -24,10 +24,10 @@ import ( "time" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) const millisecondsInDay = 86400000 diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/compareoperator_string.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/compareoperator_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/compareoperator_string.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/compareoperator_string.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_avx2_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_avx2_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_avx2_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_sse4_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_sse4_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_sse4_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/constant_factor_sse4_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/constant_factor_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/doc.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/doc.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/helpers.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/helpers.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/helpers.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/helpers.go index ca1ee61c..ed25071c 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/helpers.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/helpers.go @@ -22,13 +22,13 @@ import ( "fmt" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/arrow/scalar" - "github.com/apache/arrow/go/v12/internal/bitutils" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow/go/v14/internal/bitutils" "golang.org/x/exp/constraints" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/numeric_cast.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/numeric_cast.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/numeric_cast.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/numeric_cast.go index 5a55cc62..8e535075 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/numeric_cast.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/numeric_cast.go @@ -23,13 +23,13 @@ import ( "strconv" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/internal/bitutils" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/internal/bitutils" "golang.org/x/exp/constraints" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/rounding.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/rounding.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/rounding.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/rounding.go index 2bcfc567..2f58a9fa 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/rounding.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/rounding.go @@ -22,11 +22,11 @@ import ( "fmt" "math" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/scalar" "golang.org/x/exp/constraints" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/roundmode_string.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/roundmode_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/roundmode_string.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/roundmode_string.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_arithmetic.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_arithmetic.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_arithmetic.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_arithmetic.go index 06744f92..9cb32ae6 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_arithmetic.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_arithmetic.go @@ -22,13 +22,13 @@ import ( "fmt" "time" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/scalar" ) // scalar kernel that ignores (assumed all-null inputs) and returns null diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_boolean.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_boolean.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_boolean.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_boolean.go index db6636a1..812f4ad1 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_boolean.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_boolean.go @@ -19,9 +19,9 @@ package kernels import ( - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/scalar" ) type computeWordFN func(leftTrue, leftFalse, rightTrue, rightFalse uint64) (outValid, outData uint64) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_amd64.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_amd64.go index bf246825..585d1bff 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_amd64.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_amd64.go @@ -21,8 +21,8 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/exec" "golang.org/x/sys/cpu" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go index c9a951b1..86817905 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_noasm.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_noasm.go similarity index 93% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_noasm.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_noasm.go index 09308227..56abad42 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_noasm.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_noasm.go @@ -18,7 +18,7 @@ package kernels -import "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" +import "github.com/apache/arrow/go/v14/arrow/compute/exec" func genCompareKernel[T exec.NumericTypes](op CompareOperator) *CompareData { return genGoCompareKernel(getCmpOp[T](op)) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go index c9af1063..7cf96a41 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.go @@ -21,7 +21,7 @@ package kernels import ( "unsafe" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) //go:noescape diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparison_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparisons.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparisons.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparisons.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparisons.go index ccf9d948..8a957eaf 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/scalar_comparisons.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/scalar_comparisons.go @@ -23,14 +23,14 @@ import ( "fmt" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/scalar" - "github.com/apache/arrow/go/v12/internal/bitutils" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/scalar" + "github.com/apache/arrow/go/v14/internal/bitutils" ) type binaryKernel func(left, right, out []byte, offset int) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/string_casts.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/string_casts.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/string_casts.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/string_casts.go index f632a726..30705146 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/string_casts.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/string_casts.go @@ -23,12 +23,12 @@ import ( "strconv" "unicode/utf8" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/float16" - "github.com/apache/arrow/go/v12/internal/bitutils" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow/go/v14/internal/bitutils" ) func validateUtf8Fsb(input *exec.ArraySpan) error { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/types.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/types.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/types.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/types.go index d1fdfeb6..2788fb70 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/types.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/types.go @@ -21,10 +21,10 @@ package kernels import ( "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/scalar" ) var ( diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/vector_hash.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_hash.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/vector_hash.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_hash.go index a0e452c3..e0ede826 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/vector_hash.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_hash.go @@ -21,13 +21,13 @@ package kernels import ( "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/internal/bitutils" - "github.com/apache/arrow/go/v12/internal/hashing" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/bitutils" + "github.com/apache/arrow/go/v14/internal/hashing" ) type HashState interface { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/vector_run_end_encode.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_run_end_encode.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/vector_run_end_encode.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_run_end_encode.go index 1d5419f9..e073ff1f 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/vector_run_end_encode.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_run_end_encode.go @@ -24,14 +24,14 @@ import ( "sort" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/float16" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" ) type RunEndEncodeState struct { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/vector_selection.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_selection.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/vector_selection.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_selection.go index c10f6e6a..c7a902bd 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/internal/kernels/vector_selection.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/internal/kernels/vector_selection.go @@ -22,13 +22,13 @@ import ( "fmt" "math" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" - "github.com/apache/arrow/go/v12/internal/bitutils" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" + "github.com/apache/arrow/go/v14/internal/bitutils" ) type NullSelectionBehavior int8 diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/registry.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/registry.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/registry.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/registry.go index 3fbb12d6..4f1c435f 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/registry.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/registry.go @@ -21,7 +21,7 @@ package compute import ( "sync" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/debug" "golang.org/x/exp/maps" "golang.org/x/exp/slices" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/scalar_bool.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_bool.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/scalar_bool.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_bool.go index 6a00757e..49c74568 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/scalar_bool.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_bool.go @@ -21,9 +21,9 @@ package compute import ( "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/compute/internal/kernels" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" ) var ( diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/scalar_compare.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_compare.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/scalar_compare.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_compare.go index 53b573ab..476f3771 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/scalar_compare.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/scalar_compare.go @@ -21,9 +21,9 @@ package compute import ( "context" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/compute/internal/kernels" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" ) type compareFunction struct { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/selection.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/selection.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/selection.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/selection.go index 7a68a83b..ed6d8041 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/selection.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/selection.go @@ -22,10 +22,10 @@ import ( "context" "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/compute/internal/kernels" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" "golang.org/x/sync/errgroup" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/utils.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/utils.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/utils.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/utils.go index e0c6be34..cc4d6edc 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/utils.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/utils.go @@ -24,12 +24,12 @@ import ( "math" "time" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/compute/internal/exec" - "github.com/apache/arrow/go/v12/arrow/compute/internal/kernels" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/compute/exec" + "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" "golang.org/x/xerrors" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/vector_hash.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_hash.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/vector_hash.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_hash.go index 42ec9425..5f9aec55 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/vector_hash.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_hash.go @@ -21,8 +21,8 @@ package compute import ( "context" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/kernels" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" ) var ( diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/compute/vector_run_ends.go b/vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_run_ends.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/compute/vector_run_ends.go rename to vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_run_ends.go index 0a34c9e9..48f3dcba 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/compute/vector_run_ends.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/compute/vector_run_ends.go @@ -21,8 +21,8 @@ package compute import ( "context" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/compute/internal/kernels" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/compute/internal/kernels" ) var ( diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype.go rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype.go index 8f9ad2e2..f0fb24ec 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/datatype.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype.go @@ -21,7 +21,7 @@ import ( "hash/maphash" "strings" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) // Type is a logical type. They can be expressed as @@ -152,6 +152,19 @@ const ( RUN_END_ENCODED + // String (UTF8) view type with 4-byte prefix and inline + // small string optimizations + STRING_VIEW + + // Bytes view with 4-byte prefix and inline small byte arrays optimization + BINARY_VIEW + + // LIST_VIEW is a list of some logical data type represented with offsets and sizes + LIST_VIEW + + // like LIST but with 64-bit offsets + LARGE_LIST_VIEW + // Alias to ensure we do not break any consumers DECIMAL = DECIMAL128 ) @@ -384,7 +397,7 @@ func IsListLike(t Type) bool { // IsNested returns true for List, LargeList, FixedSizeList, Map, Struct, and Unions func IsNested(t Type) bool { switch t { - case LIST, LARGE_LIST, FIXED_SIZE_LIST, MAP, STRUCT, SPARSE_UNION, DENSE_UNION: + case LIST, LARGE_LIST, FIXED_SIZE_LIST, MAP, LIST_VIEW, LARGE_LIST_VIEW, STRUCT, SPARSE_UNION, DENSE_UNION: return true } return false diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_binary.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_binary.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype_binary.go rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype_binary.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_encoded.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_encoded.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype_encoded.go rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype_encoded.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_extension.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_extension.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype_extension.go rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype_extension.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_fixedwidth.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype_fixedwidth.go rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go index 2a2bbc77..fc0b3aea 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_fixedwidth.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_fixedwidth.go @@ -17,11 +17,12 @@ package arrow import ( - "encoding/json" "fmt" "strconv" "time" + "github.com/apache/arrow/go/v14/internal/json" + "golang.org/x/xerrors" ) @@ -106,7 +107,7 @@ func (d Date64) FormattedString() string { } // TimestampFromStringInLocation is like TimestampFromString, but treats the time instant -// as if it were in the passed timezone before converting to UTC for internal representation. +// as if it were in the provided timezone before converting to UTC for internal representation. func TimestampFromStringInLocation(val string, unit TimeUnit, loc *time.Location) (Timestamp, bool, error) { if len(val) < 10 { return 0, false, fmt.Errorf("%w: invalid timestamp string", ErrInvalid) @@ -167,17 +168,8 @@ func TimestampFromStringInLocation(val string, unit TimeUnit, loc *time.Location out = out.In(loc).UTC() } - switch unit { - case Second: - return Timestamp(out.Unix()), zoneFmt != "", nil - case Millisecond: - return Timestamp(out.Unix()*1e3 + int64(out.Nanosecond())/1e6), zoneFmt != "", nil - case Microsecond: - return Timestamp(out.Unix()*1e6 + int64(out.Nanosecond())/1e3), zoneFmt != "", nil - case Nanosecond: - return Timestamp(out.UnixNano()), zoneFmt != "", nil - } - return 0, zoneFmt != "", fmt.Errorf("%w: unexpected timestamp unit: %s", ErrInvalid, unit) + ts, err := TimestampFromTime(out, unit) + return ts, zoneFmt != "", err } // TimestampFromString parses a string and returns a timestamp for the given unit @@ -187,10 +179,10 @@ func TimestampFromStringInLocation(val string, unit TimeUnit, loc *time.Location // or a space, and [.zzzzzzzzz] can be either left out or up to 9 digits of // fractions of a second. // -// YYYY-MM-DD -// YYYY-MM-DD[T]HH -// YYYY-MM-DD[T]HH:MM -// YYYY-MM-DD[T]HH:MM:SS[.zzzzzzzz] +// YYYY-MM-DD +// YYYY-MM-DD[T]HH +// YYYY-MM-DD[T]HH:MM +// YYYY-MM-DD[T]HH:MM:SS[.zzzzzzzz] // // You can also optionally have an ending Z to indicate UTC or indicate a specific // timezone using ±HH, ±HHMM or ±HH:MM at the end of the string. @@ -200,10 +192,32 @@ func TimestampFromString(val string, unit TimeUnit) (Timestamp, error) { } func (t Timestamp) ToTime(unit TimeUnit) time.Time { - if unit == Second { + switch unit { + case Second: return time.Unix(int64(t), 0).UTC() + case Millisecond: + return time.UnixMilli(int64(t)).UTC() + case Microsecond: + return time.UnixMicro(int64(t)).UTC() + default: + return time.Unix(0, int64(t)).UTC() + } +} + +// TimestampFromTime allows converting time.Time to Timestamp +func TimestampFromTime(val time.Time, unit TimeUnit) (Timestamp, error) { + switch unit { + case Second: + return Timestamp(val.Unix()), nil + case Millisecond: + return Timestamp(val.Unix()*1e3 + int64(val.Nanosecond())/1e6), nil + case Microsecond: + return Timestamp(val.Unix()*1e6 + int64(val.Nanosecond())/1e3), nil + case Nanosecond: + return Timestamp(val.UnixNano()), nil + default: + return 0, fmt.Errorf("%w: unexpected timestamp unit: %s", ErrInvalid, unit) } - return time.Unix(0, int64(t)*int64(unit.Multiplier())).UTC() } // Time32FromString parses a string to return a Time32 value in the given unit, @@ -319,6 +333,8 @@ const ( var TimeUnitValues = []TimeUnit{Second, Millisecond, Microsecond, Nanosecond} +// Multiplier returns a time.Duration value to multiply by in order to +// convert the value into nanoseconds func (u TimeUnit) Multiplier() time.Duration { return [...]time.Duration{time.Second, time.Millisecond, time.Microsecond, time.Nanosecond}[uint(u)&3] } @@ -331,7 +347,7 @@ type TemporalWithUnit interface { } // TimestampType is encoded as a 64-bit signed integer since the UNIX epoch (2017-01-01T00:00:00Z). -// The zero-value is a nanosecond and time zone neutral. Time zone neutral can be +// The zero-value is a second and time zone neutral. Time zone neutral can be // considered UTC without having "UTC" as a time zone. type TimestampType struct { Unit TimeUnit @@ -358,9 +374,9 @@ func (t *TimestampType) Fingerprint() string { // BitWidth returns the number of bits required to store a single element of this data type in memory. func (*TimestampType) BitWidth() int { return 64 } -func (TimestampType) Bytes() int { return Int64SizeBytes } +func (*TimestampType) Bytes() int { return Int64SizeBytes } -func (TimestampType) Layout() DataTypeLayout { +func (*TimestampType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(TimestampSizeBytes)}} } @@ -428,15 +444,9 @@ func (t *TimestampType) GetToTimeFunc() (func(Timestamp) time.Time, error) { case Second: return func(v Timestamp) time.Time { return time.Unix(int64(v), 0).In(tz) }, nil case Millisecond: - factor := int64(time.Second / time.Millisecond) - return func(v Timestamp) time.Time { - return time.Unix(int64(v)/factor, (int64(v)%factor)*int64(time.Millisecond)).In(tz) - }, nil + return func(v Timestamp) time.Time { return time.UnixMilli(int64(v)).In(tz) }, nil case Microsecond: - factor := int64(time.Second / time.Microsecond) - return func(v Timestamp) time.Time { - return time.Unix(int64(v)/factor, (int64(v)%factor)*int64(time.Microsecond)).In(tz) - }, nil + return func(v Timestamp) time.Time { return time.UnixMicro(int64(v)).In(tz) }, nil case Nanosecond: return func(v Timestamp) time.Time { return time.Unix(0, int64(v)).In(tz) }, nil } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_nested.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go similarity index 69% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype_nested.go rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go index a6e280cd..4ae48803 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_nested.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_nested.go @@ -22,13 +22,28 @@ import ( "strconv" "strings" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) -type NestedType interface { - DataType - Fields() []Field -} +type ( + NestedType interface { + DataType + + // Fields method provides a copy of NestedType fields + // (so it can be safely mutated and will not result in updating the NestedType). + Fields() []Field + } + + ListLikeType interface { + DataType + Elem() DataType + ElemField() Field + } + + VarLenListLikeType interface { + ListLikeType + } +) // ListType describes a nested type in which each array slot contains // a variable-size sequence of values, all having the same relative type. @@ -94,11 +109,11 @@ func (t *ListType) ElemField() Field { func (t *ListType) Fields() []Field { return []Field{t.ElemField()} } -func (ListType) Layout() DataTypeLayout { +func (*ListType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int32SizeBytes)}} } -func (ListType) OffsetTypeTraits() OffsetTraits { return Int32Traits } +func (*ListType) OffsetTypeTraits() OffsetTraits { return Int32Traits } type LargeListType struct { ListType @@ -118,11 +133,11 @@ func (t *LargeListType) Fingerprint() string { return "" } -func (LargeListType) Layout() DataTypeLayout { +func (*LargeListType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int64SizeBytes)}} } -func (LargeListType) OffsetTypeTraits() OffsetTraits { return Int64Traits } +func (*LargeListType) OffsetTypeTraits() OffsetTraits { return Int64Traits } func LargeListOfField(f Field) *LargeListType { if f.Type == nil { @@ -131,10 +146,10 @@ func LargeListOfField(f Field) *LargeListType { return &LargeListType{ListType{elem: f}} } -// ListOf returns the list type with element type t. -// For example, if t represents int32, ListOf(t) represents []int32. +// LargeListOf returns the list type with element type t. +// For example, if t represents int32, LargeListOf(t) represents []int32. // -// ListOf panics if t is nil or invalid. NullableElem defaults to true +// LargeListOf panics if t is nil or invalid. NullableElem defaults to true func LargeListOf(t DataType) *LargeListType { if t == nil { panic("arrow: nil DataType") @@ -142,7 +157,7 @@ func LargeListOf(t DataType) *LargeListType { return &LargeListType{ListType{elem: Field{Name: "item", Type: t, Nullable: true}}} } -// ListOfNonNullable is like ListOf but NullableElem defaults to false, indicating +// LargeListOfNonNullable is like ListOf but NullableElem defaults to false, indicating // that the child type should be marked as non-nullable. func LargeListOfNonNullable(t DataType) *LargeListType { if t == nil { @@ -227,21 +242,156 @@ func (t *FixedSizeListType) Fingerprint() string { func (t *FixedSizeListType) Fields() []Field { return []Field{t.ElemField()} } -func (FixedSizeListType) Layout() DataTypeLayout { +func (*FixedSizeListType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap()}} } +type ListViewType struct { + elem Field +} + +func ListViewOfField(f Field) *ListViewType { + if f.Type == nil { + panic("arrow: nil DataType") + } + return &ListViewType{elem: f} +} + +// ListViewOf returns the list-view type with element type t. +// For example, if t represents int32, ListViewOf(t) represents []int32. +// +// ListViewOf panics if t is nil or invalid. NullableElem defaults to true +func ListViewOf(t DataType) *ListViewType { + if t == nil { + panic("arrow: nil DataType") + } + return &ListViewType{elem: Field{Name: "item", Type: t, Nullable: true}} +} + +// ListViewOfNonNullable is like ListViewOf but NullableElem defaults to false, indicating +// that the child type should be marked as non-nullable. +func ListViewOfNonNullable(t DataType) *ListViewType { + if t == nil { + panic("arrow: nil DataType") + } + return &ListViewType{elem: Field{Name: "item", Type: t, Nullable: false}} +} + +func (*ListViewType) ID() Type { return LIST_VIEW } +func (*ListViewType) Name() string { return "list_view" } + +func (t *ListViewType) String() string { + if t.elem.Nullable { + return fmt.Sprintf("list_view<%s: %s, nullable>", t.elem.Name, t.elem.Type) + } + return fmt.Sprintf("list_view<%s: %s>", t.elem.Name, t.elem.Type) +} + +func (t *ListViewType) Fingerprint() string { + child := t.elem.Type.Fingerprint() + if len(child) > 0 { + return typeFingerprint(t) + "{" + child + "}" + } + return "" +} + +func (t *ListViewType) SetElemMetadata(md Metadata) { t.elem.Metadata = md } + +func (t *ListViewType) SetElemNullable(n bool) { t.elem.Nullable = n } + +// Elem returns the ListViewType's element type. +func (t *ListViewType) Elem() DataType { return t.elem.Type } + +func (t *ListViewType) ElemField() Field { + return t.elem +} + +func (t *ListViewType) Fields() []Field { return []Field{t.ElemField()} } + +func (*ListViewType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int32SizeBytes), SpecFixedWidth(Int32SizeBytes)}} +} + +func (*ListViewType) OffsetTypeTraits() OffsetTraits { return Int32Traits } + +type LargeListViewType struct { + elem Field +} + +func LargeListViewOfField(f Field) *LargeListViewType { + if f.Type == nil { + panic("arrow: nil DataType") + } + return &LargeListViewType{elem: f} +} + +// LargeListViewOf returns the list-view type with element type t. +// For example, if t represents int32, LargeListViewOf(t) represents []int32. +// +// LargeListViewOf panics if t is nil or invalid. NullableElem defaults to true +func LargeListViewOf(t DataType) *LargeListViewType { + if t == nil { + panic("arrow: nil DataType") + } + return &LargeListViewType{elem: Field{Name: "item", Type: t, Nullable: true}} +} + +// LargeListViewOfNonNullable is like LargeListViewOf but NullableElem defaults +// to false, indicating that the child type should be marked as non-nullable. +func LargeListViewOfNonNullable(t DataType) *LargeListViewType { + if t == nil { + panic("arrow: nil DataType") + } + return &LargeListViewType{elem: Field{Name: "item", Type: t, Nullable: false}} +} + +func (*LargeListViewType) ID() Type { return LARGE_LIST_VIEW } +func (*LargeListViewType) Name() string { return "large_list_view" } + +func (t *LargeListViewType) String() string { + if t.elem.Nullable { + return fmt.Sprintf("large_list_view<%s: %s, nullable>", t.elem.Name, t.elem.Type) + } + return fmt.Sprintf("large_list_view<%s: %s>", t.elem.Name, t.elem.Type) +} + +func (t *LargeListViewType) Fingerprint() string { + child := t.elem.Type.Fingerprint() + if len(child) > 0 { + return typeFingerprint(t) + "{" + child + "}" + } + return "" +} + +func (t *LargeListViewType) SetElemMetadata(md Metadata) { t.elem.Metadata = md } + +func (t *LargeListViewType) SetElemNullable(n bool) { t.elem.Nullable = n } + +// Elem returns the LargeListViewType's element type. +func (t *LargeListViewType) Elem() DataType { return t.elem.Type } + +func (t *LargeListViewType) ElemField() Field { + return t.elem +} + +func (t *LargeListViewType) Fields() []Field { return []Field{t.ElemField()} } + +func (*LargeListViewType) Layout() DataTypeLayout { + return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap(), SpecFixedWidth(Int64SizeBytes), SpecFixedWidth(Int64SizeBytes)}} +} + +func (*LargeListViewType) OffsetTypeTraits() OffsetTraits { return Int64Traits } + // StructType describes a nested type parameterized by an ordered sequence // of relative types, called its fields. type StructType struct { fields []Field - index map[string]int + index map[string][]int meta Metadata } // StructOf returns the struct type with fields fs. // -// StructOf panics if there are duplicated fields. // StructOf panics if there is a field with an invalid DataType. func StructOf(fs ...Field) *StructType { n := len(fs) @@ -251,7 +401,7 @@ func StructOf(fs ...Field) *StructType { t := &StructType{ fields: make([]Field, n), - index: make(map[string]int, n), + index: make(map[string][]int, n), } for i, f := range fs { if f.Type == nil { @@ -263,10 +413,11 @@ func StructOf(fs ...Field) *StructType { Nullable: f.Nullable, Metadata: f.Metadata.clone(), } - if _, dup := t.index[f.Name]; dup { - panic(fmt.Errorf("arrow: duplicate field with name %q", f.Name)) + if indices, exists := t.index[f.Name]; exists { + t.index[f.Name] = append(indices, i) + } else { + t.index[f.Name] = []int{i} } - t.index[f.Name] = i } return t @@ -288,20 +439,56 @@ func (t *StructType) String() string { return o.String() } -func (t *StructType) Fields() []Field { return t.fields } +// Fields method provides a copy of StructType fields +// (so it can be safely mutated and will not result in updating the StructType). +func (t *StructType) Fields() []Field { + fields := make([]Field, len(t.fields)) + copy(fields, t.fields) + return fields +} + func (t *StructType) Field(i int) Field { return t.fields[i] } +// FieldByName gets the field with the given name. +// +// If there are multiple fields with the given name, FieldByName +// returns the first such field. func (t *StructType) FieldByName(name string) (Field, bool) { i, ok := t.index[name] if !ok { return Field{}, false } - return t.fields[i], true + return t.fields[i[0]], true } +// FieldIdx gets the index of the field with the given name. +// +// If there are multiple fields with the given name, FieldIdx returns +// the index of the first first such field. func (t *StructType) FieldIdx(name string) (int, bool) { i, ok := t.index[name] - return i, ok + if ok { + return i[0], true + } + return -1, false +} + +// FieldsByName returns all fields with the given name. +func (t *StructType) FieldsByName(n string) ([]Field, bool) { + indices, ok := t.index[n] + if !ok { + return nil, ok + } + fields := make([]Field, 0, len(indices)) + for _, v := range indices { + fields = append(fields, t.fields[v]) + } + return fields, ok +} + +// FieldIndices returns indices of all fields with the given name, or nil. +func (t *StructType) FieldIndices(name string) []int { + return t.index[name] } func (t *StructType) Fingerprint() string { @@ -320,7 +507,7 @@ func (t *StructType) Fingerprint() string { return b.String() } -func (StructType) Layout() DataTypeLayout { +func (*StructType) Layout() DataTypeLayout { return DataTypeLayout{Buffers: []BufferSpec{SpecBitmap()}} } @@ -374,17 +561,22 @@ func (t *MapType) String() string { return o.String() } -func (t *MapType) KeyField() Field { return t.value.Elem().(*StructType).Field(0) } -func (t *MapType) KeyType() DataType { return t.KeyField().Type } -func (t *MapType) ItemField() Field { return t.value.Elem().(*StructType).Field(1) } -func (t *MapType) ItemType() DataType { return t.ItemField().Type } -func (t *MapType) ValueType() *StructType { return t.value.Elem().(*StructType) } -func (t *MapType) ValueField() Field { - return Field{ - Name: "entries", - Type: t.ValueType(), - } -} +func (t *MapType) KeyField() Field { return t.value.Elem().(*StructType).Field(0) } +func (t *MapType) KeyType() DataType { return t.KeyField().Type } +func (t *MapType) ItemField() Field { return t.value.Elem().(*StructType).Field(1) } +func (t *MapType) ItemType() DataType { return t.ItemField().Type } + +// Deprecated: use MapType.Elem().(*StructType) instead +func (t *MapType) ValueType() *StructType { return t.Elem().(*StructType) } + +// Deprecated: use MapType.ElemField() instead +func (t *MapType) ValueField() Field { return t.ElemField() } + +// Elem returns the MapType's element type (if treating MapType as ListLikeType) +func (t *MapType) Elem() DataType { return t.value.Elem() } + +// ElemField returns the MapType's element field (if treating MapType as ListLikeType) +func (t *MapType) ElemField() Field { return Field{Name: "entries", Type: t.Elem()} } func (t *MapType) SetItemNullable(nullable bool) { t.value.Elem().(*StructType).fields[1].Nullable = nullable @@ -404,13 +596,13 @@ func (t *MapType) Fingerprint() string { return fingerprint + "{" + keyFingerprint + itemFingerprint + "}" } -func (t *MapType) Fields() []Field { return []Field{t.ValueField()} } +func (t *MapType) Fields() []Field { return []Field{t.ElemField()} } func (t *MapType) Layout() DataTypeLayout { return t.value.Layout() } -func (MapType) OffsetTypeTraits() OffsetTraits { return Int32Traits } +func (*MapType) OffsetTypeTraits() OffsetTraits { return Int32Traits } type ( // UnionTypeCode is an alias to int8 which is the type of the ids @@ -490,9 +682,16 @@ func (t *unionType) init(fields []Field, typeCodes []UnionTypeCode) { } } -func (t unionType) Fields() []Field { return t.children } -func (t unionType) TypeCodes() []UnionTypeCode { return t.typeCodes } -func (t unionType) ChildIDs() []int { return t.childIDs[:] } +// Fields method provides a copy of union type fields +// (so it can be safely mutated and will not result in updating the union type). +func (t *unionType) Fields() []Field { + fields := make([]Field, len(t.children)) + copy(fields, t.children) + return fields +} + +func (t *unionType) TypeCodes() []UnionTypeCode { return t.typeCodes } +func (t *unionType) ChildIDs() []int { return t.childIDs[:] } func (t *unionType) validate(fields []Field, typeCodes []UnionTypeCode, _ UnionMode) error { if len(fields) != len(typeCodes) { @@ -750,7 +949,29 @@ func (f Field) String() string { var ( _ DataType = (*ListType)(nil) + _ DataType = (*LargeListType)(nil) _ DataType = (*FixedSizeListType)(nil) _ DataType = (*StructType)(nil) _ DataType = (*MapType)(nil) + _ DataType = (*DenseUnionType)(nil) + _ DataType = (*SparseUnionType)(nil) + + _ NestedType = (*ListType)(nil) + _ NestedType = (*LargeListType)(nil) + _ NestedType = (*FixedSizeListType)(nil) + _ NestedType = (*MapType)(nil) + _ NestedType = (*DenseUnionType)(nil) + _ NestedType = (*SparseUnionType)(nil) + + _ ListLikeType = (*ListType)(nil) + _ ListLikeType = (*LargeListType)(nil) + _ ListLikeType = (*FixedSizeListType)(nil) + _ ListLikeType = (*MapType)(nil) + + _ VarLenListLikeType = (*ListType)(nil) + _ VarLenListLikeType = (*LargeListType)(nil) + _ VarLenListLikeType = (*ListViewType)(nil) + _ VarLenListLikeType = (*LargeListViewType)(nil) + _ VarLenListLikeType = (*FixedSizeListType)(nil) + _ VarLenListLikeType = (*MapType)(nil) ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_null.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_null.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype_null.go rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype_null.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpldata b/vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpldata similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/datatype_numeric.gen.go.tmpldata rename to vendor/github.com/apache/arrow/go/v14/arrow/datatype_numeric.gen.go.tmpldata diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/decimal128/decimal128.go b/vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v12/arrow/decimal128/decimal128.go rename to vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go index 7b8c0f94..898d7b42 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/decimal128/decimal128.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/decimal128/decimal128.go @@ -23,7 +23,12 @@ import ( "math/big" "math/bits" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/debug" +) + +const ( + MaxPrecision = 38 + MaxScale = 38 ) var ( @@ -224,6 +229,8 @@ func FromFloat64(v float64, prec, scale int32) (Num, error) { return fromPositiveFloat64(v, prec, scale) } +var pt5 = big.NewFloat(0.5) + func FromString(v string, prec, scale int32) (n Num, err error) { // time for some math! // Our input precision means "number of digits of precision" but the @@ -259,8 +266,19 @@ func FromString(v string, prec, scale int32) (n Num, err error) { return } + // Since we're going to truncate this to get an integer, we need to round + // the value instead because of edge cases so that we match how other implementations + // (e.g. C++) handles Decimal values. So if we're negative we'll subtract 0.5 and if + // we're positive we'll add 0.5. + out.Mul(out, big.NewFloat(math.Pow10(int(scale)))).SetPrec(precInBits) + if out.Signbit() { + out.Sub(out, pt5) + } else { + out.Add(out, pt5) + } + var tmp big.Int - val, _ := out.Mul(out, big.NewFloat(math.Pow10(int(scale)))).SetPrec(precInBits).Int(&tmp) + val, _ := out.Int(&tmp) if val.BitLen() > 127 { return Num{}, errors.New("bitlen too large for decimal128") } @@ -332,10 +350,12 @@ func (n Num) BigInt() *big.Int { return toBigIntPositive(n) } +// Greater returns true if the value represented by n is > other func (n Num) Greater(other Num) bool { return other.Less(n) } +// GreaterEqual returns true if the value represented by n is >= other func (n Num) GreaterEqual(other Num) bool { return !n.Less(other) } @@ -345,6 +365,48 @@ func (n Num) Less(other Num) bool { return n.hi < other.hi || (n.hi == other.hi && n.lo < other.lo) } +// LessEqual returns true if the value represented by n is <= other +func (n Num) LessEqual(other Num) bool { + return !n.Greater(other) +} + +// Max returns the largest Decimal128 that was passed in the arguments +func Max(first Num, rest ...Num) Num { + answer := first + for _, number := range rest { + if number.Greater(answer) { + answer = number + } + } + return answer +} + +// Min returns the smallest Decimal128 that was passed in the arguments +func Min(first Num, rest ...Num) Num { + answer := first + for _, number := range rest { + if number.Less(answer) { + answer = number + } + } + return answer +} + +// Cmp compares the numbers represented by n and other and returns: +// +// +1 if n > other +// 0 if n == other +// -1 if n < other +func (n Num) Cmp(other Num) int { + switch { + case n.Greater(other): + return 1 + case n.Less(other): + return -1 + } + return 0 +} + // IncreaseScaleBy returns a new decimal128.Num with the value scaled up by // the desired amount. Must be 0 <= increase <= 38. Any data loss from scaling // is ignored. If you wish to prevent data loss, use Rescale which will diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/decimal256/decimal256.go b/vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/decimal256/decimal256.go rename to vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go index 3a1e57b0..4bfcd4e0 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/decimal256/decimal256.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/decimal256/decimal256.go @@ -23,8 +23,8 @@ import ( "math/big" "math/bits" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) const ( @@ -365,14 +365,17 @@ func (n Num) BigInt() *big.Int { return toBigIntPositive(n) } +// Greater returns true if the value represented by n is > other func (n Num) Greater(other Num) bool { return other.Less(n) } +// GreaterEqual returns true if the value represented by n is >= other func (n Num) GreaterEqual(other Num) bool { return !n.Less(other) } +// Less returns true if the value represented by n is < other func (n Num) Less(other Num) bool { switch { case n.arr[3] != other.arr[3]: @@ -385,6 +388,48 @@ func (n Num) Less(other Num) bool { return n.arr[0] < other.arr[0] } +// LessEqual returns true if the value represented by n is <= other +func (n Num) LessEqual(other Num) bool { + return !n.Greater(other) +} + +// Max returns the largest Decimal256 that was passed in the arguments +func Max(first Num, rest ...Num) Num { + answer := first + for _, number := range rest { + if number.Greater(answer) { + answer = number + } + } + return answer +} + +// Min returns the smallest Decimal256 that was passed in the arguments +func Min(first Num, rest ...Num) Num { + answer := first + for _, number := range rest { + if number.Less(answer) { + answer = number + } + } + return answer +} + +// Cmp compares the numbers represented by n and other and returns: +// +// +1 if n > other +// 0 if n == other +// -1 if n < other +func (n Num) Cmp(other Num) int { + switch { + case n.Greater(other): + return 1 + case n.Less(other): + return -1 + } + return 0 +} + func (n Num) IncreaseScaleBy(increase int32) Num { debug.Assert(increase >= 0, "invalid amount to increase scale by") debug.Assert(increase <= 76, "invalid amount to increase scale by") diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/doc.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v12/arrow/doc.go rename to vendor/github.com/apache/arrow/go/v14/arrow/doc.go index a0a320c2..e923d05d 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/doc.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/doc.go @@ -22,20 +22,21 @@ language-independent columnar memory format for flat and hierarchical data, orga operations on modern hardware. It also provides computational libraries and zero-copy streaming messaging and inter-process communication. -Basics +# Basics The fundamental data structure in Arrow is an Array, which holds a sequence of values of the same type. An array consists of memory holding the data and an additional validity bitmap that indicates if the corresponding entry in the array is valid (not null). If the array has no null entries, it is possible to omit this bitmap. -Requirements +# Requirements -Despite the go.mod stating go1.18, everything except for the compute package -is able to be built with go1.17 (and most is also compatible with go1.16). +Despite the go.mod stating go1.20, everything is able to be built with go1.19 or higher. + +To build with tinygo include the noasm build tag. */ package arrow -const PkgVersion = "12.0.0" +const PkgVersion = "14.0.2" //go:generate go run _tools/tmpl/main.go -i -data=numeric.tmpldata type_traits_numeric.gen.go.tmpl type_traits_numeric.gen_test.go.tmpl array/numeric.gen.go.tmpl array/numericbuilder.gen.go.tmpl array/bufferbuilder_numeric.gen.go.tmpl //go:generate go run _tools/tmpl/main.go -i -data=datatype_numeric.gen.go.tmpldata datatype_numeric.gen.go.tmpl tensor/numeric.gen.go.tmpl tensor/numeric.gen_test.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/encoded/ree_utils.go b/vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/encoded/ree_utils.go rename to vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go index 1d8a6a75..1f71e7b5 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/encoded/ree_utils.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/encoded/ree_utils.go @@ -20,7 +20,7 @@ import ( "math" "sort" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) // FindPhysicalIndex performs a binary search on the run-ends to return diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/endian/big.go b/vendor/github.com/apache/arrow/go/v14/arrow/endian/big.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/endian/big.go rename to vendor/github.com/apache/arrow/go/v14/arrow/endian/big.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/endian/endian.go b/vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go similarity index 91% rename from vendor/github.com/apache/arrow/go/v12/arrow/endian/endian.go rename to vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go index 4ae6ded7..3ecda7b3 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/endian/endian.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/endian/endian.go @@ -17,8 +17,8 @@ package endian import ( - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" ) type Endianness flatbuf.Endianness diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/endian/little.go b/vendor/github.com/apache/arrow/go/v14/arrow/endian/little.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/endian/little.go rename to vendor/github.com/apache/arrow/go/v14/arrow/endian/little.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/errors.go b/vendor/github.com/apache/arrow/go/v14/arrow/errors.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/errors.go rename to vendor/github.com/apache/arrow/go/v14/arrow/errors.go index b4a11b95..72e6fd8b 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/errors.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/errors.go @@ -24,4 +24,5 @@ var ( ErrType = errors.New("type error") ErrKey = errors.New("key error") ErrIndex = errors.New("index error") + ErrNotFound = errors.New("not found") ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/float16/float16.go b/vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go similarity index 51% rename from vendor/github.com/apache/arrow/go/v12/arrow/float16/float16.go rename to vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go index c46a3a1a..4e03d13d 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/float16/float16.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/float16/float16.go @@ -66,5 +66,100 @@ func (f Num) Float32() float32 { return math.Float32frombits((sn << 31) | (res << 23) | (fc << 13)) } +func (n Num) Negate() Num { + return Num{bits: n.bits ^ 0x8000} +} + +func (n Num) Add(rhs Num) Num { + return New(n.Float32() + rhs.Float32()) +} + +func (n Num) Sub(rhs Num) Num { + return New(n.Float32() - rhs.Float32()) +} + +func (n Num) Mul(rhs Num) Num { + return New(n.Float32() * rhs.Float32()) +} + +func (n Num) Div(rhs Num) Num { + return New(n.Float32() / rhs.Float32()) +} + +// Greater returns true if the value represented by n is > other +func (n Num) Greater(other Num) bool { + return n.Float32() > other.Float32() +} + +// GreaterEqual returns true if the value represented by n is >= other +func (n Num) GreaterEqual(other Num) bool { + return n.Float32() >= other.Float32() +} + +// Less returns true if the value represented by n is < other +func (n Num) Less(other Num) bool { + return n.Float32() < other.Float32() +} + +// LessEqual returns true if the value represented by n is <= other +func (n Num) LessEqual(other Num) bool { + return n.Float32() <= other.Float32() +} + +// Max returns the largest Decimal128 that was passed in the arguments +func Max(first Num, rest ...Num) Num { + answer := first + for _, number := range rest { + if number.Greater(answer) { + answer = number + } + } + return answer +} + +// Min returns the smallest Decimal128 that was passed in the arguments +func Min(first Num, rest ...Num) Num { + answer := first + for _, number := range rest { + if number.Less(answer) { + answer = number + } + } + return answer +} + +// Cmp compares the numbers represented by n and other and returns: +// +// +1 if n > other +// 0 if n == other +// -1 if n < other +func (n Num) Cmp(other Num) int { + switch { + case n.Greater(other): + return 1 + case n.Less(other): + return -1 + } + return 0 +} + +func (n Num) Abs() Num { + switch n.Sign() { + case -1: + return n.Negate() + } + return n +} + +func (n Num) Sign() int { + f := n.Float32() + if f > 0 { + return 1 + } else if f == 0 { + return 0 + } + return -1 +} + func (f Num) Uint16() uint16 { return f.bits } func (f Num) String() string { return strconv.FormatFloat(float64(f.Float32()), 'g', -1, 32) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_off.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_off.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_off.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_off.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_on.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_on.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/assert_on.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/assert_on.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/doc.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/doc.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_off.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_off.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_off.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_off.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_on.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_on.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/log_on.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/log_on.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/util.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/util.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/debug/util.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/debug/util.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/dictutils/dict.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/dictutils/dict.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go index fb986d74..e09a2f4a 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/internal/dictutils/dict.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/dictutils/dict.go @@ -21,9 +21,9 @@ import ( "fmt" "hash/maphash" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/memory" ) type Kind int8 diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Binary.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Binary.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Binary.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Binary.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go new file mode 100644 index 00000000..09ca5e7d --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BinaryView.go @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Logically the same as Binary, but the internal representation uses a view +/// struct that contains the string length and either the string's entire data +/// inline (for small strings) or an inlined prefix, an index of another buffer, +/// and an offset pointing to a slice in that buffer (for non-small strings). +/// +/// Since it uses a variable number of data buffers, each Field with this type +/// must have a corresponding entry in `variadicBufferCounts`. +type BinaryView struct { + _tab flatbuffers.Table +} + +func GetRootAsBinaryView(buf []byte, offset flatbuffers.UOffsetT) *BinaryView { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &BinaryView{} + x.Init(buf, n+offset) + return x +} + +func (rcv *BinaryView) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *BinaryView) Table() flatbuffers.Table { + return rcv._tab +} + +func BinaryViewStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func BinaryViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Block.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Block.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Block.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Block.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompression.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompression.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompression.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompression.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompressionMethod.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompressionMethod.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/BodyCompressionMethod.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/BodyCompressionMethod.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Bool.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Bool.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Bool.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Bool.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Buffer.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Buffer.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Buffer.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Buffer.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/CompressionType.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/CompressionType.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/CompressionType.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/CompressionType.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Date.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Date.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Date.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Date.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DateUnit.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DateUnit.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DateUnit.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DateUnit.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Decimal.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Decimal.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Decimal.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Decimal.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryBatch.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryBatch.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryBatch.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryBatch.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryEncoding.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryEncoding.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryEncoding.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryEncoding.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryKind.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryKind.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/DictionaryKind.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/DictionaryKind.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Duration.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Duration.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Duration.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Duration.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Endianness.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Endianness.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Endianness.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Endianness.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Feature.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Feature.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Feature.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Feature.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Field.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Field.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Field.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Field.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FieldNode.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FieldNode.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FieldNode.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FieldNode.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeBinary.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeBinary.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeBinary.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeBinary.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeList.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeList.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FixedSizeList.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FixedSizeList.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FloatingPoint.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FloatingPoint.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/FloatingPoint.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/FloatingPoint.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Footer.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Footer.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Footer.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Footer.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Int.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Int.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Int.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Int.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Interval.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Interval.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Interval.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Interval.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/IntervalUnit.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/IntervalUnit.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/IntervalUnit.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/IntervalUnit.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/KeyValue.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/KeyValue.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/KeyValue.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/KeyValue.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeBinary.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeBinary.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeBinary.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeBinary.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeList.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeList.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeList.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeList.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go new file mode 100644 index 00000000..5b1df149 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeListView.go @@ -0,0 +1,52 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Same as ListView, but with 64-bit offsets and sizes, allowing to represent +/// extremely large data values. +type LargeListView struct { + _tab flatbuffers.Table +} + +func GetRootAsLargeListView(buf []byte, offset flatbuffers.UOffsetT) *LargeListView { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &LargeListView{} + x.Init(buf, n+offset) + return x +} + +func (rcv *LargeListView) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *LargeListView) Table() flatbuffers.Table { + return rcv._tab +} + +func LargeListViewStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func LargeListViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeUtf8.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeUtf8.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/LargeUtf8.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/LargeUtf8.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/List.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/List.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/List.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/List.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go new file mode 100644 index 00000000..46b1e0b3 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/ListView.go @@ -0,0 +1,53 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Represents the same logical types that List can, but contains offsets and +/// sizes allowing for writes in any order and sharing of child values among +/// list values. +type ListView struct { + _tab flatbuffers.Table +} + +func GetRootAsListView(buf []byte, offset flatbuffers.UOffsetT) *ListView { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &ListView{} + x.Init(buf, n+offset) + return x +} + +func (rcv *ListView) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *ListView) Table() flatbuffers.Table { + return rcv._tab +} + +func ListViewStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func ListViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Map.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Map.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Map.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Map.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Message.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Message.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Message.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Message.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MessageHeader.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MessageHeader.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MessageHeader.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MessageHeader.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MetadataVersion.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MetadataVersion.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/MetadataVersion.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/MetadataVersion.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Null.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Null.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Null.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Null.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Precision.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Precision.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Precision.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Precision.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RecordBatch.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go similarity index 65% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RecordBatch.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go index bb6aca9a..c50f4a6e 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RecordBatch.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RecordBatch.go @@ -128,8 +128,62 @@ func (rcv *RecordBatch) Compression(obj *BodyCompression) *BodyCompression { } /// Optional compression of the message body +/// Some types such as Utf8View are represented using a variable number of buffers. +/// For each such Field in the pre-ordered flattened logical schema, there will be +/// an entry in variadicBufferCounts to indicate the number of number of variadic +/// buffers which belong to that Field in the current RecordBatch. +/// +/// For example, the schema +/// col1: Struct +/// col2: Utf8View +/// contains two Fields with variadic buffers so variadicBufferCounts will have +/// two entries, the first counting the variadic buffers of `col1.beta` and the +/// second counting `col2`'s. +/// +/// This field may be omitted if and only if the schema contains no Fields with +/// a variable number of buffers, such as BinaryView and Utf8View. +func (rcv *RecordBatch) VariadicBufferCounts(j int) int64 { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.GetInt64(a + flatbuffers.UOffsetT(j*8)) + } + return 0 +} + +func (rcv *RecordBatch) VariadicBufferCountsLength() int { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + return rcv._tab.VectorLen(o) + } + return 0 +} + +/// Some types such as Utf8View are represented using a variable number of buffers. +/// For each such Field in the pre-ordered flattened logical schema, there will be +/// an entry in variadicBufferCounts to indicate the number of number of variadic +/// buffers which belong to that Field in the current RecordBatch. +/// +/// For example, the schema +/// col1: Struct +/// col2: Utf8View +/// contains two Fields with variadic buffers so variadicBufferCounts will have +/// two entries, the first counting the variadic buffers of `col1.beta` and the +/// second counting `col2`'s. +/// +/// This field may be omitted if and only if the schema contains no Fields with +/// a variable number of buffers, such as BinaryView and Utf8View. +func (rcv *RecordBatch) MutateVariadicBufferCounts(j int, n int64) bool { + o := flatbuffers.UOffsetT(rcv._tab.Offset(12)) + if o != 0 { + a := rcv._tab.Vector(o) + return rcv._tab.MutateInt64(a+flatbuffers.UOffsetT(j*8), n) + } + return false +} + func RecordBatchStart(builder *flatbuffers.Builder) { - builder.StartObject(4) + builder.StartObject(5) } func RecordBatchAddLength(builder *flatbuffers.Builder, length int64) { builder.PrependInt64Slot(0, length, 0) @@ -149,6 +203,12 @@ func RecordBatchStartBuffersVector(builder *flatbuffers.Builder, numElems int) f func RecordBatchAddCompression(builder *flatbuffers.Builder, compression flatbuffers.UOffsetT) { builder.PrependUOffsetTSlot(3, flatbuffers.UOffsetT(compression), 0) } +func RecordBatchAddVariadicBufferCounts(builder *flatbuffers.Builder, variadicBufferCounts flatbuffers.UOffsetT) { + builder.PrependUOffsetTSlot(4, flatbuffers.UOffsetT(variadicBufferCounts), 0) +} +func RecordBatchStartVariadicBufferCountsVector(builder *flatbuffers.Builder, numElems int) flatbuffers.UOffsetT { + return builder.StartVector(8, numElems, 8) +} func RecordBatchEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { return builder.EndObject() } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunEndEncoded.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunEndEncoded.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunEndEncoded.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunEndEncoded.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunLengthEncoded.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunLengthEncoded.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/RunLengthEncoded.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/RunLengthEncoded.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Schema.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Schema.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Schema.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Schema.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixCompressedAxis.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSR.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSR.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSR.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSR.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSX.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSX.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseMatrixIndexCSX.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseMatrixIndexCSX.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensor.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensor.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensor.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensor.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndex.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndex.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndex.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndex.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCOO.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCOO.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCOO.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCOO.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCSF.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCSF.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/SparseTensorIndexCSF.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/SparseTensorIndexCSF.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Struct_.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Struct_.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Struct_.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Struct_.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Tensor.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Tensor.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Tensor.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Tensor.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TensorDim.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TensorDim.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TensorDim.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TensorDim.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Time.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Time.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Time.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Time.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TimeUnit.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TimeUnit.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/TimeUnit.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/TimeUnit.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Timestamp.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Timestamp.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Timestamp.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Timestamp.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Type.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Type.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go index 707444b8..ab2bce9c 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Type.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Type.go @@ -49,6 +49,10 @@ const ( TypeLargeUtf8 Type = 20 TypeLargeList Type = 21 TypeRunEndEncoded Type = 22 + TypeBinaryView Type = 23 + TypeUtf8View Type = 24 + TypeListView Type = 25 + TypeLargeListView Type = 26 ) var EnumNamesType = map[Type]string{ @@ -75,6 +79,10 @@ var EnumNamesType = map[Type]string{ TypeLargeUtf8: "LargeUtf8", TypeLargeList: "LargeList", TypeRunEndEncoded: "RunEndEncoded", + TypeBinaryView: "BinaryView", + TypeUtf8View: "Utf8View", + TypeListView: "ListView", + TypeLargeListView: "LargeListView", } var EnumValuesType = map[string]Type{ @@ -101,6 +109,10 @@ var EnumValuesType = map[string]Type{ "LargeUtf8": TypeLargeUtf8, "LargeList": TypeLargeList, "RunEndEncoded": TypeRunEndEncoded, + "BinaryView": TypeBinaryView, + "Utf8View": TypeUtf8View, + "ListView": TypeListView, + "LargeListView": TypeLargeListView, } func (v Type) String() string { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Union.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Union.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Union.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Union.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/UnionMode.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/UnionMode.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/UnionMode.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/UnionMode.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Utf8.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/flatbuf/Utf8.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8.go diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go new file mode 100644 index 00000000..9cf82149 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/flatbuf/Utf8View.go @@ -0,0 +1,57 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by the FlatBuffers compiler. DO NOT EDIT. + +package flatbuf + +import ( + flatbuffers "github.com/google/flatbuffers/go" +) + +/// Logically the same as Utf8, but the internal representation uses a view +/// struct that contains the string length and either the string's entire data +/// inline (for small strings) or an inlined prefix, an index of another buffer, +/// and an offset pointing to a slice in that buffer (for non-small strings). +/// +/// Since it uses a variable number of data buffers, each Field with this type +/// must have a corresponding entry in `variadicBufferCounts`. +type Utf8View struct { + _tab flatbuffers.Table +} + +func GetRootAsUtf8View(buf []byte, offset flatbuffers.UOffsetT) *Utf8View { + n := flatbuffers.GetUOffsetT(buf[offset:]) + x := &Utf8View{} + x.Init(buf, n+offset) + return x +} + +func (rcv *Utf8View) Init(buf []byte, i flatbuffers.UOffsetT) { + rcv._tab.Bytes = buf + rcv._tab.Pos = i +} + +func (rcv *Utf8View) Table() flatbuffers.Table { + return rcv._tab +} + +func Utf8ViewStart(builder *flatbuffers.Builder) { + builder.StartObject(0) +} +func Utf8ViewEnd(builder *flatbuffers.Builder) flatbuffers.UOffsetT { + return builder.EndObject() +} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/internal/utils.go b/vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v12/arrow/internal/utils.go rename to vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go index c19d2d27..265f030d 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/internal/utils.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/internal/utils.go @@ -17,8 +17,8 @@ package internal import ( - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" ) const CurMetadataVersion = flatbuf.MetadataVersionV5 diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/compression.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/ipc/compression.go rename to vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go index 7ba76067..73fb9165 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/compression.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/compression.go @@ -19,9 +19,9 @@ package ipc import ( "io" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow/memory" "github.com/klauspost/compress/zstd" "github.com/pierrec/lz4/v4" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/endian_swap.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/ipc/endian_swap.go rename to vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go index c20d727e..d98fec10 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/endian_swap.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/endian_swap.go @@ -20,9 +20,9 @@ import ( "errors" "math/bits" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/memory" ) // swap the endianness of the array's buffers as needed in-place to save diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_reader.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_reader.go rename to vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go index f008bd49..10cb2cae 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_reader.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_reader.go @@ -23,14 +23,14 @@ import ( "fmt" "io" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/endian" - "github.com/apache/arrow/go/v12/arrow/internal" - "github.com/apache/arrow/go/v12/arrow/internal/dictutils" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/internal" + "github.com/apache/arrow/go/v14/arrow/internal/dictutils" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow/memory" ) // FileReader is an Arrow file reader. @@ -485,6 +485,12 @@ func (ctx *arrayLoaderContext) loadArray(dt arrow.DataType) arrow.ArrayData { case *arrow.LargeListType: return ctx.loadList(dt) + case *arrow.ListViewType: + return ctx.loadListView(dt) + + case *arrow.LargeListViewType: + return ctx.loadListView(dt) + case *arrow.FixedSizeListType: return ctx.loadFixedSizeList(dt) @@ -589,18 +595,13 @@ func (ctx *arrayLoaderContext) loadMap(dt *arrow.MapType) arrow.ArrayData { buffers = append(buffers, ctx.buffer()) defer releaseBuffers(buffers) - sub := ctx.loadChild(dt.ValueType()) + sub := ctx.loadChild(dt.Elem()) defer sub.Release() return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) } -type listLike interface { - arrow.DataType - Elem() arrow.DataType -} - -func (ctx *arrayLoaderContext) loadList(dt listLike) arrow.ArrayData { +func (ctx *arrayLoaderContext) loadList(dt arrow.ListLikeType) arrow.ArrayData { field, buffers := ctx.loadCommon(dt.ID(), 2) buffers = append(buffers, ctx.buffer()) defer releaseBuffers(buffers) @@ -611,6 +612,17 @@ func (ctx *arrayLoaderContext) loadList(dt listLike) arrow.ArrayData { return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) } +func (ctx *arrayLoaderContext) loadListView(dt arrow.VarLenListLikeType) arrow.ArrayData { + field, buffers := ctx.loadCommon(dt.ID(), 3) + buffers = append(buffers, ctx.buffer(), ctx.buffer()) + defer releaseBuffers(buffers) + + sub := ctx.loadChild(dt.Elem()) + defer sub.Release() + + return array.NewData(dt, int(field.Length()), buffers, []arrow.ArrayData{sub}, int(field.NullCount()), 0) +} + func (ctx *arrayLoaderContext) loadFixedSizeList(dt *arrow.FixedSizeListType) arrow.ArrayData { field, buffers := ctx.loadCommon(dt.ID(), 1) defer releaseBuffers(buffers) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_writer.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_writer.go rename to vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go index 20c82b96..12384225 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/file_writer.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/file_writer.go @@ -21,11 +21,11 @@ import ( "fmt" "io" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal/dictutils" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal/dictutils" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow/memory" ) // PayloadWriter is an interface for injecting a different payloadwriter diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/ipc.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/ipc/ipc.go rename to vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go index e651a993..6c04b6f5 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/ipc.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/ipc.go @@ -19,10 +19,10 @@ package ipc import ( "io" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/arrio" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/arrio" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow/memory" ) const ( diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/message.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/ipc/message.go rename to vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go index adc231cd..c5d0ec68 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/message.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/message.go @@ -22,9 +22,9 @@ import ( "io" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow/memory" ) // MetadataVersion represents the Arrow metadata version. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/metadata.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/ipc/metadata.go rename to vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go index 980425e5..9bab47d6 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/metadata.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/metadata.go @@ -23,11 +23,11 @@ import ( "io" "sort" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/endian" - "github.com/apache/arrow/go/v12/arrow/internal/dictutils" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/internal/dictutils" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow/memory" flatbuffers "github.com/google/flatbuffers/go" ) @@ -386,6 +386,18 @@ func (fv *fieldVisitor) visit(field arrow.Field) { flatbuf.LargeListStart(fv.b) fv.offset = flatbuf.LargeListEnd(fv.b) + case *arrow.ListViewType: + fv.dtype = flatbuf.TypeListView + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.ListViewStart(fv.b) + fv.offset = flatbuf.ListViewEnd(fv.b) + + case *arrow.LargeListViewType: + fv.dtype = flatbuf.TypeLargeListView + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) + flatbuf.LargeListViewStart(fv.b) + fv.offset = flatbuf.LargeListViewEnd(fv.b) + case *arrow.FixedSizeListType: fv.dtype = flatbuf.TypeFixedSizeList fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) @@ -420,7 +432,7 @@ func (fv *fieldVisitor) visit(field arrow.Field) { case *arrow.MapType: fv.dtype = flatbuf.TypeMap - fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ValueField(), fv.memo)) + fv.kids = append(fv.kids, fieldToFB(fv.b, fv.pos.Child(0), dt.ElemField(), fv.memo)) flatbuf.MapStart(fv.b) flatbuf.MapAddKeysSorted(fv.b, dt.KeysSorted) fv.offset = flatbuf.MapEnd(fv.b) @@ -718,6 +730,20 @@ func concreteTypeFromFB(typ flatbuf.Type, data flatbuffers.Table, children []arr dt := arrow.LargeListOfField(children[0]) return dt, nil + case flatbuf.TypeListView: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: ListView must have exactly 1 child field (got=%d)", len(children)) + } + dt := arrow.ListViewOfField(children[0]) + return dt, nil + + case flatbuf.TypeLargeListView: + if len(children) != 1 { + return nil, fmt.Errorf("arrow/ipc: LargeListView must have exactly 1 child field (got=%d)", len(children)) + } + dt := arrow.LargeListViewOfField(children[0]) + return dt, nil + case flatbuf.TypeFixedSizeList: var dt flatbuf.FixedSizeList dt.Init(data.Bytes, data.Pos) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/reader.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/ipc/reader.go rename to vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go index 9c502f6a..1f684c1f 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/reader.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/reader.go @@ -23,13 +23,13 @@ import ( "io" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/endian" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/internal/dictutils" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/dictutils" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow/memory" ) // Reader reads records from an io.Reader. @@ -159,6 +159,7 @@ func (r *Reader) Release() { r.r.Release() r.r = nil } + r.memo.Clear() } } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/writer.go b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go similarity index 85% rename from vendor/github.com/apache/arrow/go/v12/arrow/ipc/writer.go rename to vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go index 93c6d8df..a97f47ef 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/ipc/writer.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/ipc/writer.go @@ -26,14 +26,14 @@ import ( "sync" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/internal" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/internal/dictutils" - "github.com/apache/arrow/go/v12/arrow/internal/flatbuf" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/internal" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/dictutils" + "github.com/apache/arrow/go/v14/arrow/internal/flatbuf" + "github.com/apache/arrow/go/v14/arrow/memory" ) type swriter struct { @@ -577,10 +577,7 @@ func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { case *arrow.BinaryType, *arrow.LargeBinaryType, *arrow.StringType, *arrow.LargeStringType: arr := arr.(array.BinaryLike) - voffsets, err := w.getZeroBasedValueOffsets(arr) - if err != nil { - return fmt.Errorf("could not retrieve zero-based value offsets from %T: %w", arr, err) - } + voffsets := w.getZeroBasedValueOffsets(arr) data := arr.Data() values := data.Buffers()[2] @@ -687,10 +684,7 @@ func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { w.depth++ case *arrow.MapType, *arrow.ListType, *arrow.LargeListType: arr := arr.(array.ListLike) - voffsets, err := w.getZeroBasedValueOffsets(arr) - if err != nil { - return fmt.Errorf("could not retrieve zero-based value offsets for array %T: %w", arr, err) - } + voffsets := w.getZeroBasedValueOffsets(arr) p.body = append(p.body, voffsets) w.depth-- @@ -716,7 +710,52 @@ func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { values = array.NewSlice(values, values_offset, values_end) mustRelease = true } - err = w.visit(p, values) + err := w.visit(p, values) + + if err != nil { + return fmt.Errorf("could not visit list element for array %T: %w", arr, err) + } + w.depth++ + + case *arrow.ListViewType, *arrow.LargeListViewType: + data := arr.Data() + arr := arr.(array.VarLenListLike) + offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits() + rngOff, rngLen := array.RangeOfValuesUsed(arr) + voffsets := w.getValueOffsetsAtBaseValue(arr, rngOff) + p.body = append(p.body, voffsets) + + vsizes := data.Buffers()[2] + if vsizes != nil { + if data.Offset() != 0 || vsizes.Len() > offsetTraits.BytesRequired(arr.Len()) { + beg := offsetTraits.BytesRequired(data.Offset()) + end := beg + offsetTraits.BytesRequired(data.Len()) + vsizes = memory.NewBufferBytes(vsizes.Bytes()[beg:end]) + } else { + vsizes.Retain() + } + } + p.body = append(p.body, vsizes) + + w.depth-- + var ( + values = arr.ListValues() + mustRelease = false + values_offset = int64(rngOff) + values_end = int64(rngOff + rngLen) + ) + defer func() { + if mustRelease { + values.Release() + } + }() + + if arr.Len() > 0 && values_end < int64(values.Len()) { + // must also slice the values + values = array.NewSlice(values, values_offset, values_end) + mustRelease = true + } + err := w.visit(p, values) if err != nil { return fmt.Errorf("could not visit list element for array %T: %w", arr, err) @@ -764,19 +803,25 @@ func (w *recordEncoder) visit(p *Payload, arr arrow.Array) error { return nil } -func (w *recordEncoder) getZeroBasedValueOffsets(arr arrow.Array) (*memory.Buffer, error) { +func (w *recordEncoder) getZeroBasedValueOffsets(arr arrow.Array) *memory.Buffer { data := arr.Data() voffsets := data.Buffers()[1] offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits() offsetBytesNeeded := offsetTraits.BytesRequired(data.Len() + 1) - if data.Offset() != 0 || offsetBytesNeeded < voffsets.Len() { - // if we have a non-zero offset, then the value offsets do not start at - // zero. we must a) create a new offsets array with shifted offsets and - // b) slice the values array accordingly - // - // or if there are more value offsets than values (the array has been sliced) - // we need to trim off the trailing offsets + if voffsets == nil || voffsets.Len() == 0 { + return nil + } + + // if we have a non-zero offset, then the value offsets do not start at + // zero. we must a) create a new offsets array with shifted offsets and + // b) slice the values array accordingly + // + // or if there are more value offsets than values (the array has been sliced) + // we need to trim off the trailing offsets + needsTruncateAndShift := data.Offset() != 0 || offsetBytesNeeded < voffsets.Len() + + if needsTruncateAndShift { shiftedOffsets := memory.NewResizableBuffer(w.mem) shiftedOffsets.Resize(offsetBytesNeeded) @@ -805,11 +850,65 @@ func (w *recordEncoder) getZeroBasedValueOffsets(arr arrow.Array) (*memory.Buffe } else { voffsets.Retain() } + + return voffsets +} + +// Truncates the offsets if needed and shifts the values if minOffset > 0. +// The offsets returned are corrected assuming the child values are truncated +// and now start at minOffset. +// +// This function only works on offset buffers of ListViews and LargeListViews. +// TODO(felipecrv): Unify this with getZeroBasedValueOffsets. +func (w *recordEncoder) getValueOffsetsAtBaseValue(arr arrow.Array, minOffset int) *memory.Buffer { + data := arr.Data() + voffsets := data.Buffers()[1] + offsetTraits := arr.DataType().(arrow.OffsetsDataType).OffsetTypeTraits() + offsetBytesNeeded := offsetTraits.BytesRequired(data.Len()) + if voffsets == nil || voffsets.Len() == 0 { - return nil, nil + return nil + } + + needsTruncate := data.Offset() != 0 || offsetBytesNeeded < voffsets.Len() + needsShift := minOffset > 0 + + if needsTruncate || needsShift { + shiftedOffsets := memory.NewResizableBuffer(w.mem) + shiftedOffsets.Resize(offsetBytesNeeded) + + switch arr.DataType().Layout().Buffers[1].ByteWidth { + case 8: + dest := arrow.Int64Traits.CastFromBytes(shiftedOffsets.Bytes()) + offsets := arrow.Int64Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()] + + if minOffset > 0 { + for i, o := range offsets { + dest[i] = o - int64(minOffset) + } + } else { + copy(dest, offsets) + } + default: + debug.Assert(arr.DataType().Layout().Buffers[1].ByteWidth == 4, "invalid offset bytewidth") + dest := arrow.Int32Traits.CastFromBytes(shiftedOffsets.Bytes()) + offsets := arrow.Int32Traits.CastFromBytes(voffsets.Bytes())[data.Offset() : data.Offset()+data.Len()] + + if minOffset > 0 { + for i, o := range offsets { + dest[i] = o - int32(minOffset) + } + } else { + copy(dest, offsets) + } + } + + voffsets = shiftedOffsets + } else { + voffsets.Retain() } - return voffsets, nil + return voffsets } func (w *recordEncoder) rebaseDenseUnionValueOffsets(arr *array.DenseUnion, offsets, lengths []int32) *memory.Buffer { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/Makefile b/vendor/github.com/apache/arrow/go/v14/arrow/memory/Makefile similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/Makefile rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/Makefile diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/allocator.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/allocator.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/allocator.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/buffer.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/buffer.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go index ce445d2d..2ddb3f82 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/memory/buffer.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/buffer.go @@ -19,7 +19,7 @@ package memory import ( "sync/atomic" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) // Buffer is a wrapper type for a buffer of bytes. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go index 1c149c0a..85ee4452 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator.go @@ -22,7 +22,7 @@ package memory import ( "runtime" - cga "github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc" + cga "github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc" ) // CgoArrowAllocator is an allocator which exposes the C++ memory pool class diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_defaults.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_defaults.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_defaults.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_defaults.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_logging.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_logging.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/cgo_allocator_logging.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/cgo_allocator_logging.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/checked_allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/checked_allocator.go similarity index 55% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/checked_allocator.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/checked_allocator.go index 06be9bda..78a09a57 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/memory/checked_allocator.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/checked_allocator.go @@ -14,12 +14,17 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !tinygo +// +build !tinygo + package memory import ( + "fmt" "os" "runtime" "strconv" + "strings" "sync" "sync/atomic" "unsafe" @@ -46,8 +51,16 @@ func (a *CheckedAllocator) Allocate(size int) []byte { } ptr := uintptr(unsafe.Pointer(&out[0])) + pcs := make([]uintptr, maxRetainedFrames) + + // For historical reasons the meaning of the skip argument + // differs between Caller and Callers. For Callers, 0 identifies + // the frame for the caller itself. We skip 2 additional frames + // here to get to the caller right before the call to Allocate. + runtime.Callers(allocFrames+2, pcs) + callersFrames := runtime.CallersFrames(pcs) if pc, _, l, ok := runtime.Caller(allocFrames); ok { - a.allocs.Store(ptr, &dalloc{pc: pc, line: l, sz: size}) + a.allocs.Store(ptr, &dalloc{pc: pc, line: l, sz: size, callersFrames: callersFrames}) } return out } @@ -63,9 +76,18 @@ func (a *CheckedAllocator) Reallocate(size int, b []byte) []byte { newptr := uintptr(unsafe.Pointer(&out[0])) a.allocs.Delete(oldptr) + pcs := make([]uintptr, maxRetainedFrames) + + // For historical reasons the meaning of the skip argument + // differs between Caller and Callers. For Callers, 0 identifies + // the frame for the caller itself. We skip 2 additional frames + // here to get to the caller right before the call to Reallocate. + runtime.Callers(reallocFrames+2, pcs) + callersFrames := runtime.CallersFrames(pcs) if pc, _, l, ok := runtime.Caller(reallocFrames); ok { - a.allocs.Store(newptr, &dalloc{pc: pc, line: l, sz: size}) + a.allocs.Store(newptr, &dalloc{pc: pc, line: l, sz: size, callersFrames: callersFrames}) } + return out } @@ -86,14 +108,16 @@ func (a *CheckedAllocator) Free(b []byte) { // of the inner workings of Buffer in order to find the caller that actually triggered // the allocation via a call to Resize/Reserve/etc. const ( - defAllocFrames = 4 - defReallocFrames = 3 + defAllocFrames = 4 + defReallocFrames = 3 + defMaxRetainedFrames = 0 ) // Use the environment variables ARROW_CHECKED_ALLOC_FRAMES and ARROW_CHECKED_REALLOC_FRAMES -// to control how many frames up it checks when storing the caller for allocations/reallocs -// when using this to find memory leaks. -var allocFrames, reallocFrames int = defAllocFrames, defReallocFrames +// to control how many frames it skips when storing the caller for allocations/reallocs +// when using this to find memory leaks. Use ARROW_CHECKED_MAX_RETAINED_FRAMES to control how +// many frames are retained for printing the stack trace of a leak. +var allocFrames, reallocFrames, maxRetainedFrames int = defAllocFrames, defReallocFrames, defMaxRetainedFrames func init() { if val, ok := os.LookupEnv("ARROW_CHECKED_ALLOC_FRAMES"); ok { @@ -107,12 +131,19 @@ func init() { reallocFrames = f } } + + if val, ok := os.LookupEnv("ARROW_CHECKED_MAX_RETAINED_FRAMES"); ok { + if f, err := strconv.Atoi(val); err == nil { + maxRetainedFrames = f + } + } } type dalloc struct { - pc uintptr - line int - sz int + pc uintptr + line int + sz int + callersFrames *runtime.Frames } type TestingT interface { @@ -124,7 +155,40 @@ func (a *CheckedAllocator) AssertSize(t TestingT, sz int) { a.allocs.Range(func(_, value interface{}) bool { info := value.(*dalloc) f := runtime.FuncForPC(info.pc) - t.Errorf("LEAK of %d bytes FROM %s line %d\n", info.sz, f.Name(), info.line) + frames := info.callersFrames + var callersMsg strings.Builder + for { + frame, more := frames.Next() + if frame.Line == 0 { + break + } + callersMsg.WriteString("\t") + // frame.Func is a useful source of information if it's present. + // It may be nil for non-Go code or fully inlined functions. + if fn := frame.Func; fn != nil { + // format as func name + the offset in bytes from func entrypoint + callersMsg.WriteString(fmt.Sprintf("%s+%x", fn.Name(), frame.PC-fn.Entry())) + } else { + // fallback to outer func name + file line + callersMsg.WriteString(fmt.Sprintf("%s, line %d", frame.Function, frame.Line)) + } + + // Write a proper file name + line, so it's really easy to find the leak + callersMsg.WriteString("\n\t\t") + callersMsg.WriteString(frame.File + ":" + strconv.Itoa(frame.Line)) + callersMsg.WriteString("\n") + if !more { + break + } + } + + file, line := f.FileLine(info.pc) + t.Errorf("LEAK of %d bytes FROM\n\t%s+%x\n\t\t%s:%d\n%v", + info.sz, + f.Name(), info.pc-f.Entry(), // func name + offset in bytes between frame & entrypoint to func + file, line, // a proper file name + line, so it's really easy to find the leak + callersMsg.String(), + ) return true }) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/default_allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_allocator.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/default_allocator.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/default_allocator.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/default_mallocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go similarity index 94% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/default_mallocator.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go index 060bf487..12ad0846 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/memory/default_mallocator.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/default_mallocator.go @@ -19,7 +19,7 @@ package memory import ( - "github.com/apache/arrow/go/v12/arrow/memory/mallocator" + "github.com/apache/arrow/go/v14/arrow/memory/mallocator" ) // DefaultAllocator is a default implementation of Allocator and can be used anywhere diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/doc.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/doc.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/go_allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/go_allocator.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/go_allocator.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/go_allocator.go index 1dea4a8d..1017eb68 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/memory/go_allocator.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/go_allocator.go @@ -32,10 +32,9 @@ func (a *GoAllocator) Allocate(size int) []byte { } func (a *GoAllocator) Reallocate(size int, b []byte) []byte { - if size == len(b) { - return b + if cap(b) >= size { + return b[:size] } - newBuf := a.Allocate(size) copy(newBuf, b) return newBuf diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.cc b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.cc similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.cc rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.cc diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go index 213e7599..48f34d86 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.go @@ -14,12 +14,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build ccalloc // +build ccalloc package cgoalloc // #cgo !windows pkg-config: arrow -// #cgo CXXFLAGS: -std=c++14 +// #cgo CXXFLAGS: -std=c++17 // #cgo windows LDFLAGS: -larrow // #include "allocator.h" import "C" diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.h b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.h similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/allocator.h rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/allocator.h diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/helpers.h b/vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/helpers.h similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc/helpers.h rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc/helpers.h diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/doc.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/doc.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/doc.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/doc.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/mallocator.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/mallocator.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/mallocator/mallocator.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/mallocator/mallocator.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_arm64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_arm64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_arm64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_arm64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_avx2_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_js_wasm.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_js_wasm.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_js_wasm.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_js_wasm.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.s b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_neon_arm64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_neon_arm64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_noasm.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_noasm.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_noasm.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_noasm.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/memory_sse4_amd64.s rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/memory_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/memory/util.go b/vendor/github.com/apache/arrow/go/v14/arrow/memory/util.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/memory/util.go rename to vendor/github.com/apache/arrow/go/v14/arrow/memory/util.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/numeric.schema.json b/vendor/github.com/apache/arrow/go/v14/arrow/numeric.schema.json similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/numeric.schema.json rename to vendor/github.com/apache/arrow/go/v14/arrow/numeric.schema.json diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/numeric.tmpldata b/vendor/github.com/apache/arrow/go/v14/arrow/numeric.tmpldata similarity index 90% rename from vendor/github.com/apache/arrow/go/v12/arrow/numeric.tmpldata rename to vendor/github.com/apache/arrow/go/v14/arrow/numeric.tmpldata index 92cd48ba..3c2d63b7 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/numeric.tmpldata +++ b/vendor/github.com/apache/arrow/go/v14/arrow/numeric.tmpldata @@ -78,18 +78,6 @@ "Default": "0", "Size": "1" }, - { - "Name": "Timestamp", - "name": "timestamp", - "Type": "Timestamp", - "QualifiedType": "arrow.Timestamp", - "InternalType": "int64", - "Default": "0", - "Size": "8", - "Opt": { - "Parametric": true - } - }, { "Name": "Time32", "name": "time32", diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/record.go b/vendor/github.com/apache/arrow/go/v14/arrow/record.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/record.go rename to vendor/github.com/apache/arrow/go/v14/arrow/record.go index b93f1015..d98c7732 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/record.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/record.go @@ -16,7 +16,7 @@ package arrow -import "encoding/json" +import "github.com/apache/arrow/go/v14/internal/json" // Record is a collection of equal-length arrays matching a particular Schema. // Also known as a RecordBatch in the spec and in some implementations. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/append.go b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/append.go similarity index 96% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/append.go rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/append.go index 335f0ae6..9bcfe3e2 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/append.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/append.go @@ -21,11 +21,11 @@ package scalar import ( "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/float16" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/float16" "golang.org/x/exp/constraints" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/binary.go b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/binary.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/binary.go rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/binary.go index 483ae2d7..3c041210 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/binary.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/binary.go @@ -21,8 +21,8 @@ import ( "fmt" "unicode/utf8" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/memory" ) type BinaryScalar interface { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/compare.go b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/compare.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/compare.go rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/compare.go index c7950e59..be7fa4d0 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/compare.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/compare.go @@ -16,7 +16,7 @@ package scalar -import "github.com/apache/arrow/go/v12/arrow" +import "github.com/apache/arrow/go/v14/arrow" // Equals returns true if two scalars are equal, which means they have the same // datatype, validity and value. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/nested.go b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/nested.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/nested.go rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/nested.go index 9de56810..87e84210 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/nested.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/nested.go @@ -21,10 +21,10 @@ import ( "errors" "fmt" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" "golang.org/x/xerrors" ) @@ -69,20 +69,7 @@ func (l *List) Validate() (err error) { return } - var ( - valueType arrow.DataType - ) - - switch dt := l.Type.(type) { - case *arrow.ListType: - valueType = dt.Elem() - case *arrow.LargeListType: - valueType = dt.Elem() - case *arrow.FixedSizeListType: - valueType = dt.Elem() - case *arrow.MapType: - valueType = dt.ValueType() - } + valueType := l.Type.(arrow.ListLikeType).Elem() listType := l.Type if !arrow.TypeEqual(l.Value.DataType(), valueType) { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen.go rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go index 4dbeaf83..90ae2a70 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go @@ -24,9 +24,9 @@ import ( "reflect" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" ) type Int8 struct { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen.go.tmpldata b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go.tmpldata similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen.go.tmpldata rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen.go.tmpldata diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen_test.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen_test.go.tmpl similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen_test.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen_test.go.tmpl index e837de99..c975cc9d 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/numeric.gen_test.go.tmpl +++ b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/numeric.gen_test.go.tmpl @@ -19,8 +19,8 @@ package scalar_test import ( "testing" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/scalar" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/scalar" "github.com/stretchr/testify/assert" ) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/parse.go b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/parse.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/parse.go rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/parse.go index 326ea07c..fcffe1ea 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/parse.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/parse.go @@ -25,12 +25,12 @@ import ( "strings" "time" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/float16" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow/go/v14/arrow/memory" ) type TypeToScalar interface { @@ -512,7 +512,7 @@ func MakeScalarParam(val interface{}, dt arrow.DataType) (Scalar, error) { } return NewFixedSizeListScalarWithType(v, dt), nil case arrow.MAP: - if !arrow.TypeEqual(dt.(*arrow.MapType).ValueType(), v.DataType()) { + if !arrow.TypeEqual(dt.(*arrow.MapType).Elem(), v.DataType()) { return nil, fmt.Errorf("inconsistent type for map scalar type") } return NewMapScalar(v), nil diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/scalar.go b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/scalar.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/scalar.go rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/scalar.go index 190a7c30..395771fa 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/scalar.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/scalar.go @@ -26,16 +26,16 @@ import ( "strconv" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/encoded" - "github.com/apache/arrow/go/v12/arrow/endian" - "github.com/apache/arrow/go/v12/arrow/float16" - "github.com/apache/arrow/go/v12/arrow/internal/debug" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/encoded" + "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/float16" + "github.com/apache/arrow/go/v14/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/memory" "golang.org/x/xerrors" ) @@ -386,7 +386,7 @@ func (s *Decimal256) CastTo(to arrow.DataType) (Scalar, error) { return NewStringScalar(val.Quo(val, scale).Text('g', int(dt.Precision))), nil } - return nil, fmt.Errorf("cannot cast non-nil decimal128 scalar to type %s", to) + return nil, fmt.Errorf("cannot cast non-nil decimal256 scalar to type %s", to) } func NewDecimal256Scalar(val decimal256.Num, typ arrow.DataType) *Decimal256 { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/temporal.go b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/temporal.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/arrow/scalar/temporal.go rename to vendor/github.com/apache/arrow/go/v14/arrow/scalar/temporal.go index 6b6eff7d..880416f7 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/scalar/temporal.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/scalar/temporal.go @@ -22,7 +22,7 @@ import ( "time" "unsafe" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) func temporalToString(s TemporalScalar) string { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/schema.go b/vendor/github.com/apache/arrow/go/v14/arrow/schema.go similarity index 88% rename from vendor/github.com/apache/arrow/go/v12/arrow/schema.go rename to vendor/github.com/apache/arrow/go/v14/arrow/schema.go index 87bfe2b4..a7fa4341 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/schema.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/schema.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/endian" ) type Metadata struct { @@ -66,6 +66,13 @@ func MetadataFrom(kv map[string]string) Metadata { func (md Metadata) Len() int { return len(md.keys) } func (md Metadata) Keys() []string { return md.keys } func (md Metadata) Values() []string { return md.values } +func (md Metadata) ToMap() map[string]string { + m := make(map[string]string, len(md.keys)) + for i := range md.keys { + m[md.keys[i]] = md.values[i] + } + return m +} func (md Metadata) String() string { o := new(strings.Builder) @@ -187,8 +194,13 @@ func (sc *Schema) WithEndianness(e endian.Endianness) *Schema { func (sc *Schema) Endianness() endian.Endianness { return sc.endianness } func (sc *Schema) IsNativeEndian() bool { return sc.endianness == endian.NativeEndian } func (sc *Schema) Metadata() Metadata { return sc.meta } -func (sc *Schema) Fields() []Field { return sc.fields } -func (sc *Schema) Field(i int) Field { return sc.fields[i] } +func (sc *Schema) Fields() []Field { + fields := make([]Field, len(sc.fields)) + copy(fields, sc.fields) + return fields +} +func (sc *Schema) Field(i int) Field { return sc.fields[i] } +func (sc *Schema) NumFields() int { return len(sc.fields) } func (sc *Schema) FieldsByName(n string) ([]Field, bool) { indices, ok := sc.index[n] @@ -232,6 +244,19 @@ func (sc *Schema) Equal(o *Schema) bool { return true } +// AddField adds a field at the given index and return a new schema. +func (s *Schema) AddField(i int, field Field) (*Schema, error) { + if i < 0 || i > len(s.fields) { + return nil, fmt.Errorf("arrow: invalid field index %d", i) + } + + fields := make([]Field, len(s.fields)+1) + copy(fields[:i], s.fields[:i]) + fields[i] = field + copy(fields[i+1:], s.fields[i:]) + return NewSchema(fields, &s.meta), nil +} + func (s *Schema) String() string { o := new(strings.Builder) fmt.Fprintf(o, "schema:\n fields: %d\n", len(s.Fields())) diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/table.go b/vendor/github.com/apache/arrow/go/v14/arrow/table.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/table.go rename to vendor/github.com/apache/arrow/go/v14/arrow/table.go index 0d20d955..5a68085f 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/table.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/table.go @@ -20,7 +20,7 @@ import ( "fmt" "sync/atomic" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) // Table represents a logical sequence of chunked arrays of equal length. It is @@ -33,6 +33,10 @@ type Table interface { NumCols() int64 Column(i int) *Column + // AddColumn adds a new column to the table and a corresponding field (of the same type) + // to its schema, at the specified position. Returns the new table with updated columns and schema. + AddColumn(pos int, f Field, c Column) (Table, error) + Retain() Release() } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/tools.go b/vendor/github.com/apache/arrow/go/v14/arrow/tools.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/tools.go rename to vendor/github.com/apache/arrow/go/v14/arrow/tools.go diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_string.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_string.go similarity index 89% rename from vendor/github.com/apache/arrow/go/v12/arrow/type_string.go rename to vendor/github.com/apache/arrow/go/v14/arrow/type_string.go index 41a40738..ee3ccb7e 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/type_string.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_string.go @@ -47,11 +47,15 @@ func _() { _ = x[LARGE_LIST-36] _ = x[INTERVAL_MONTH_DAY_NANO-37] _ = x[RUN_END_ENCODED-38] + _ = x[STRING_VIEW-39] + _ = x[BINARY_VIEW-40] + _ = x[LIST_VIEW-41] + _ = x[LARGE_LIST_VIEW-42] } -const _Type_name = "NULLBOOLUINT8INT8UINT16INT16UINT32INT32UINT64INT64FLOAT16FLOAT32FLOAT64STRINGBINARYFIXED_SIZE_BINARYDATE32DATE64TIMESTAMPTIME32TIME64INTERVAL_MONTHSINTERVAL_DAY_TIMEDECIMAL128DECIMAL256LISTSTRUCTSPARSE_UNIONDENSE_UNIONDICTIONARYMAPEXTENSIONFIXED_SIZE_LISTDURATIONLARGE_STRINGLARGE_BINARYLARGE_LISTINTERVAL_MONTH_DAY_NANORUN_END_ENCODED" +const _Type_name = "NULLBOOLUINT8INT8UINT16INT16UINT32INT32UINT64INT64FLOAT16FLOAT32FLOAT64STRINGBINARYFIXED_SIZE_BINARYDATE32DATE64TIMESTAMPTIME32TIME64INTERVAL_MONTHSINTERVAL_DAY_TIMEDECIMAL128DECIMAL256LISTSTRUCTSPARSE_UNIONDENSE_UNIONDICTIONARYMAPEXTENSIONFIXED_SIZE_LISTDURATIONLARGE_STRINGLARGE_BINARYLARGE_LISTINTERVAL_MONTH_DAY_NANORUN_END_ENCODEDSTRING_VIEWBINARY_VIEWLIST_VIEWLARGE_LIST_VIEW" -var _Type_index = [...]uint16{0, 4, 8, 13, 17, 23, 28, 34, 39, 45, 50, 57, 64, 71, 77, 83, 100, 106, 112, 121, 127, 133, 148, 165, 175, 185, 189, 195, 207, 218, 228, 231, 240, 255, 263, 275, 287, 297, 320, 335} +var _Type_index = [...]uint16{0, 4, 8, 13, 17, 23, 28, 34, 39, 45, 50, 57, 64, 71, 77, 83, 100, 106, 112, 121, 127, 133, 148, 165, 175, 185, 189, 195, 207, 218, 228, 231, 240, 255, 263, 275, 287, 297, 320, 335, 346, 357, 366, 381} func (i Type) String() string { if i < 0 || i >= Type(len(_Type_index)-1) { diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_boolean.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go similarity index 95% rename from vendor/github.com/apache/arrow/go/v12/arrow/type_traits_boolean.go rename to vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go index c4e93f5e..6a46bdec 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_boolean.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_boolean.go @@ -17,7 +17,7 @@ package arrow import ( - "github.com/apache/arrow/go/v12/arrow/bitutil" + "github.com/apache/arrow/go/v14/arrow/bitutil" ) type booleanTraits struct{} diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal128.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go similarity index 81% rename from vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal128.go rename to vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go index 1fc653a1..d2d3aae3 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal128.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal128.go @@ -20,8 +20,8 @@ import ( "reflect" "unsafe" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/endian" ) // Decimal128 traits @@ -49,26 +49,14 @@ func (decimal128Traits) PutValue(b []byte, v decimal128.Num) { func (decimal128Traits) CastFromBytes(b []byte) []decimal128.Num { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []decimal128.Num - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Decimal128SizeBytes - s.Cap = h.Cap / Decimal128SizeBytes - - return res + return unsafe.Slice((*decimal128.Num)(unsafe.Pointer(h.Data)), cap(b)/Decimal128SizeBytes)[:len(b)/Decimal128SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (decimal128Traits) CastToBytes(b []decimal128.Num) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Decimal128SizeBytes - s.Cap = h.Cap * Decimal128SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Decimal128SizeBytes)[:len(b)*Decimal128SizeBytes] } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal256.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go similarity index 78% rename from vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal256.go rename to vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go index 0fd3256f..256ed68f 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_decimal256.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_decimal256.go @@ -20,8 +20,8 @@ import ( "reflect" "unsafe" - "github.com/apache/arrow/go/v12/arrow/decimal256" - "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/decimal256" + "github.com/apache/arrow/go/v14/arrow/endian" ) // Decimal256 traits @@ -46,25 +46,13 @@ func (decimal256Traits) PutValue(b []byte, v decimal256.Num) { func (decimal256Traits) CastFromBytes(b []byte) []decimal256.Num { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []decimal256.Num - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Decimal256SizeBytes - s.Cap = h.Cap / Decimal256SizeBytes - - return res + return unsafe.Slice((*decimal256.Num)(unsafe.Pointer(h.Data)), cap(b)/Decimal256SizeBytes)[:len(b)/Decimal256SizeBytes] } func (decimal256Traits) CastToBytes(b []decimal256.Num) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - s.Data = h.Data - s.Len = h.Len * Decimal256SizeBytes - s.Cap = h.Cap * Decimal256SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Decimal256SizeBytes)[:len(b)*Decimal256SizeBytes] } func (decimal256Traits) Copy(dst, src []decimal256.Num) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_float16.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go similarity index 81% rename from vendor/github.com/apache/arrow/go/v12/arrow/type_traits_float16.go rename to vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go index 1adc6e8d..c40363d3 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_float16.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_float16.go @@ -20,8 +20,8 @@ import ( "reflect" "unsafe" - "github.com/apache/arrow/go/v12/arrow/float16" - "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/float16" ) // Float16 traits @@ -48,26 +48,14 @@ func (float16Traits) PutValue(b []byte, v float16.Num) { func (float16Traits) CastFromBytes(b []byte) []float16.Num { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []float16.Num - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Float16SizeBytes - s.Cap = h.Cap / Float16SizeBytes - - return res + return unsafe.Slice((*float16.Num)(unsafe.Pointer(h.Data)), cap(b)/Float16SizeBytes)[:len(b)/Float16SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (float16Traits) CastToBytes(b []float16.Num) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Float16SizeBytes - s.Cap = h.Cap * Float16SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Float16SizeBytes)[:len(b)*Float16SizeBytes] } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_interval.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go similarity index 79% rename from vendor/github.com/apache/arrow/go/v12/arrow/type_traits_interval.go rename to vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go index 6d68bd0c..35e60570 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_interval.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_interval.go @@ -20,8 +20,8 @@ import ( "reflect" "unsafe" - "github.com/apache/arrow/go/v12/arrow/endian" - "github.com/apache/arrow/go/v12/arrow/internal/debug" + "github.com/apache/arrow/go/v14/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/internal/debug" ) var ( @@ -59,26 +59,14 @@ func (monthTraits) PutValue(b []byte, v MonthInterval) { func (monthTraits) CastFromBytes(b []byte) []MonthInterval { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []MonthInterval - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / MonthIntervalSizeBytes - s.Cap = h.Cap / MonthIntervalSizeBytes - - return res + return unsafe.Slice((*MonthInterval)(unsafe.Pointer(h.Data)), cap(b)/MonthIntervalSizeBytes)[:len(b)/MonthIntervalSizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (monthTraits) CastToBytes(b []MonthInterval) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * MonthIntervalSizeBytes - s.Cap = h.Cap * MonthIntervalSizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*MonthIntervalSizeBytes)[:len(b)*MonthIntervalSizeBytes] } // Copy copies src to dst. @@ -108,26 +96,14 @@ func (daytimeTraits) PutValue(b []byte, v DayTimeInterval) { func (daytimeTraits) CastFromBytes(b []byte) []DayTimeInterval { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []DayTimeInterval - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / DayTimeIntervalSizeBytes - s.Cap = h.Cap / DayTimeIntervalSizeBytes - - return res + return unsafe.Slice((*DayTimeInterval)(unsafe.Pointer(h.Data)), cap(b)/DayTimeIntervalSizeBytes)[:len(b)/DayTimeIntervalSizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (daytimeTraits) CastToBytes(b []DayTimeInterval) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * DayTimeIntervalSizeBytes - s.Cap = h.Cap * DayTimeIntervalSizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*DayTimeIntervalSizeBytes)[:len(b)*DayTimeIntervalSizeBytes] } // Copy copies src to dst. @@ -158,26 +134,14 @@ func (monthDayNanoTraits) PutValue(b []byte, v MonthDayNanoInterval) { func (monthDayNanoTraits) CastFromBytes(b []byte) []MonthDayNanoInterval { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []MonthDayNanoInterval - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / MonthDayNanoIntervalSizeBytes - s.Cap = h.Cap / MonthDayNanoIntervalSizeBytes - - return res + return unsafe.Slice((*MonthDayNanoInterval)(unsafe.Pointer(h.Data)), cap(b)/MonthDayNanoIntervalSizeBytes)[:len(b)/MonthDayNanoIntervalSizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (monthDayNanoTraits) CastToBytes(b []MonthDayNanoInterval) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * MonthDayNanoIntervalSizeBytes - s.Cap = h.Cap * MonthDayNanoIntervalSizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*MonthDayNanoIntervalSizeBytes)[:len(b)*MonthDayNanoIntervalSizeBytes] } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go similarity index 69% rename from vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go rename to vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go index 725316c7..6edd7529 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go @@ -23,26 +23,25 @@ import ( "reflect" "unsafe" - "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/endian" ) var ( - Int64Traits int64Traits - Uint64Traits uint64Traits - Float64Traits float64Traits - Int32Traits int32Traits - Uint32Traits uint32Traits - Float32Traits float32Traits - Int16Traits int16Traits - Uint16Traits uint16Traits - Int8Traits int8Traits - Uint8Traits uint8Traits - TimestampTraits timestampTraits - Time32Traits time32Traits - Time64Traits time64Traits - Date32Traits date32Traits - Date64Traits date64Traits - DurationTraits durationTraits + Int64Traits int64Traits + Uint64Traits uint64Traits + Float64Traits float64Traits + Int32Traits int32Traits + Uint32Traits uint32Traits + Float32Traits float32Traits + Int16Traits int16Traits + Uint16Traits uint16Traits + Int8Traits int8Traits + Uint8Traits uint8Traits + Time32Traits time32Traits + Time64Traits time64Traits + Date32Traits date32Traits + Date64Traits date64Traits + DurationTraits durationTraits ) // Int64 traits @@ -68,26 +67,14 @@ func (int64Traits) PutValue(b []byte, v int64) { func (int64Traits) CastFromBytes(b []byte) []int64 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []int64 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Int64SizeBytes - s.Cap = h.Cap / Int64SizeBytes - - return res + return unsafe.Slice((*int64)(unsafe.Pointer(h.Data)), cap(b)/Int64SizeBytes)[:len(b)/Int64SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (int64Traits) CastToBytes(b []int64) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Int64SizeBytes - s.Cap = h.Cap * Int64SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int64SizeBytes)[:len(b)*Int64SizeBytes] } // Copy copies src to dst. @@ -116,26 +103,14 @@ func (uint64Traits) PutValue(b []byte, v uint64) { func (uint64Traits) CastFromBytes(b []byte) []uint64 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []uint64 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Uint64SizeBytes - s.Cap = h.Cap / Uint64SizeBytes - - return res + return unsafe.Slice((*uint64)(unsafe.Pointer(h.Data)), cap(b)/Uint64SizeBytes)[:len(b)/Uint64SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (uint64Traits) CastToBytes(b []uint64) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Uint64SizeBytes - s.Cap = h.Cap * Uint64SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint64SizeBytes)[:len(b)*Uint64SizeBytes] } // Copy copies src to dst. @@ -164,26 +139,14 @@ func (float64Traits) PutValue(b []byte, v float64) { func (float64Traits) CastFromBytes(b []byte) []float64 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []float64 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Float64SizeBytes - s.Cap = h.Cap / Float64SizeBytes - - return res + return unsafe.Slice((*float64)(unsafe.Pointer(h.Data)), cap(b)/Float64SizeBytes)[:len(b)/Float64SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (float64Traits) CastToBytes(b []float64) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Float64SizeBytes - s.Cap = h.Cap * Float64SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Float64SizeBytes)[:len(b)*Float64SizeBytes] } // Copy copies src to dst. @@ -212,26 +175,14 @@ func (int32Traits) PutValue(b []byte, v int32) { func (int32Traits) CastFromBytes(b []byte) []int32 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []int32 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Int32SizeBytes - s.Cap = h.Cap / Int32SizeBytes - - return res + return unsafe.Slice((*int32)(unsafe.Pointer(h.Data)), cap(b)/Int32SizeBytes)[:len(b)/Int32SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (int32Traits) CastToBytes(b []int32) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Int32SizeBytes - s.Cap = h.Cap * Int32SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int32SizeBytes)[:len(b)*Int32SizeBytes] } // Copy copies src to dst. @@ -260,26 +211,14 @@ func (uint32Traits) PutValue(b []byte, v uint32) { func (uint32Traits) CastFromBytes(b []byte) []uint32 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []uint32 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Uint32SizeBytes - s.Cap = h.Cap / Uint32SizeBytes - - return res + return unsafe.Slice((*uint32)(unsafe.Pointer(h.Data)), cap(b)/Uint32SizeBytes)[:len(b)/Uint32SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (uint32Traits) CastToBytes(b []uint32) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Uint32SizeBytes - s.Cap = h.Cap * Uint32SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint32SizeBytes)[:len(b)*Uint32SizeBytes] } // Copy copies src to dst. @@ -308,26 +247,14 @@ func (float32Traits) PutValue(b []byte, v float32) { func (float32Traits) CastFromBytes(b []byte) []float32 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []float32 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Float32SizeBytes - s.Cap = h.Cap / Float32SizeBytes - - return res + return unsafe.Slice((*float32)(unsafe.Pointer(h.Data)), cap(b)/Float32SizeBytes)[:len(b)/Float32SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (float32Traits) CastToBytes(b []float32) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Float32SizeBytes - s.Cap = h.Cap * Float32SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Float32SizeBytes)[:len(b)*Float32SizeBytes] } // Copy copies src to dst. @@ -356,26 +283,14 @@ func (int16Traits) PutValue(b []byte, v int16) { func (int16Traits) CastFromBytes(b []byte) []int16 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []int16 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Int16SizeBytes - s.Cap = h.Cap / Int16SizeBytes - - return res + return unsafe.Slice((*int16)(unsafe.Pointer(h.Data)), cap(b)/Int16SizeBytes)[:len(b)/Int16SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (int16Traits) CastToBytes(b []int16) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Int16SizeBytes - s.Cap = h.Cap * Int16SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int16SizeBytes)[:len(b)*Int16SizeBytes] } // Copy copies src to dst. @@ -404,26 +319,14 @@ func (uint16Traits) PutValue(b []byte, v uint16) { func (uint16Traits) CastFromBytes(b []byte) []uint16 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []uint16 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Uint16SizeBytes - s.Cap = h.Cap / Uint16SizeBytes - - return res + return unsafe.Slice((*uint16)(unsafe.Pointer(h.Data)), cap(b)/Uint16SizeBytes)[:len(b)/Uint16SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (uint16Traits) CastToBytes(b []uint16) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Uint16SizeBytes - s.Cap = h.Cap * Uint16SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint16SizeBytes)[:len(b)*Uint16SizeBytes] } // Copy copies src to dst. @@ -452,26 +355,14 @@ func (int8Traits) PutValue(b []byte, v int8) { func (int8Traits) CastFromBytes(b []byte) []int8 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []int8 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Int8SizeBytes - s.Cap = h.Cap / Int8SizeBytes - - return res + return unsafe.Slice((*int8)(unsafe.Pointer(h.Data)), cap(b)/Int8SizeBytes)[:len(b)/Int8SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (int8Traits) CastToBytes(b []int8) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Int8SizeBytes - s.Cap = h.Cap * Int8SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Int8SizeBytes)[:len(b)*Int8SizeBytes] } // Copy copies src to dst. @@ -500,79 +391,19 @@ func (uint8Traits) PutValue(b []byte, v uint8) { func (uint8Traits) CastFromBytes(b []byte) []uint8 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []uint8 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Uint8SizeBytes - s.Cap = h.Cap / Uint8SizeBytes - - return res + return unsafe.Slice((*uint8)(unsafe.Pointer(h.Data)), cap(b)/Uint8SizeBytes)[:len(b)/Uint8SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (uint8Traits) CastToBytes(b []uint8) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Uint8SizeBytes - s.Cap = h.Cap * Uint8SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Uint8SizeBytes)[:len(b)*Uint8SizeBytes] } // Copy copies src to dst. func (uint8Traits) Copy(dst, src []uint8) { copy(dst, src) } -// Timestamp traits - -const ( - // TimestampSizeBytes specifies the number of bytes required to store a single Timestamp in memory - TimestampSizeBytes = int(unsafe.Sizeof(Timestamp(0))) -) - -type timestampTraits struct{} - -// BytesRequired returns the number of bytes required to store n elements in memory. -func (timestampTraits) BytesRequired(n int) int { return TimestampSizeBytes * n } - -// PutValue -func (timestampTraits) PutValue(b []byte, v Timestamp) { - endian.Native.PutUint64(b, uint64(v)) -} - -// CastFromBytes reinterprets the slice b to a slice of type Timestamp. -// -// NOTE: len(b) must be a multiple of TimestampSizeBytes. -func (timestampTraits) CastFromBytes(b []byte) []Timestamp { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - var res []Timestamp - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / TimestampSizeBytes - s.Cap = h.Cap / TimestampSizeBytes - - return res -} - -// CastToBytes reinterprets the slice b to a slice of bytes. -func (timestampTraits) CastToBytes(b []Timestamp) []byte { - h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * TimestampSizeBytes - s.Cap = h.Cap * TimestampSizeBytes - - return res -} - -// Copy copies src to dst. -func (timestampTraits) Copy(dst, src []Timestamp) { copy(dst, src) } - // Time32 traits const ( @@ -596,26 +427,14 @@ func (time32Traits) PutValue(b []byte, v Time32) { func (time32Traits) CastFromBytes(b []byte) []Time32 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []Time32 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Time32SizeBytes - s.Cap = h.Cap / Time32SizeBytes - - return res + return unsafe.Slice((*Time32)(unsafe.Pointer(h.Data)), cap(b)/Time32SizeBytes)[:len(b)/Time32SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (time32Traits) CastToBytes(b []Time32) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Time32SizeBytes - s.Cap = h.Cap * Time32SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Time32SizeBytes)[:len(b)*Time32SizeBytes] } // Copy copies src to dst. @@ -644,26 +463,14 @@ func (time64Traits) PutValue(b []byte, v Time64) { func (time64Traits) CastFromBytes(b []byte) []Time64 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []Time64 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Time64SizeBytes - s.Cap = h.Cap / Time64SizeBytes - - return res + return unsafe.Slice((*Time64)(unsafe.Pointer(h.Data)), cap(b)/Time64SizeBytes)[:len(b)/Time64SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (time64Traits) CastToBytes(b []Time64) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Time64SizeBytes - s.Cap = h.Cap * Time64SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Time64SizeBytes)[:len(b)*Time64SizeBytes] } // Copy copies src to dst. @@ -692,26 +499,14 @@ func (date32Traits) PutValue(b []byte, v Date32) { func (date32Traits) CastFromBytes(b []byte) []Date32 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []Date32 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Date32SizeBytes - s.Cap = h.Cap / Date32SizeBytes - - return res + return unsafe.Slice((*Date32)(unsafe.Pointer(h.Data)), cap(b)/Date32SizeBytes)[:len(b)/Date32SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (date32Traits) CastToBytes(b []Date32) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Date32SizeBytes - s.Cap = h.Cap * Date32SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Date32SizeBytes)[:len(b)*Date32SizeBytes] } // Copy copies src to dst. @@ -740,26 +535,14 @@ func (date64Traits) PutValue(b []byte, v Date64) { func (date64Traits) CastFromBytes(b []byte) []Date64 { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []Date64 - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / Date64SizeBytes - s.Cap = h.Cap / Date64SizeBytes - - return res + return unsafe.Slice((*Date64)(unsafe.Pointer(h.Data)), cap(b)/Date64SizeBytes)[:len(b)/Date64SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (date64Traits) CastToBytes(b []Date64) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * Date64SizeBytes - s.Cap = h.Cap * Date64SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*Date64SizeBytes)[:len(b)*Date64SizeBytes] } // Copy copies src to dst. @@ -788,26 +571,14 @@ func (durationTraits) PutValue(b []byte, v Duration) { func (durationTraits) CastFromBytes(b []byte) []Duration { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []Duration - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len / DurationSizeBytes - s.Cap = h.Cap / DurationSizeBytes - - return res + return unsafe.Slice((*Duration)(unsafe.Pointer(h.Data)), cap(b)/DurationSizeBytes)[:len(b)/DurationSizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func (durationTraits) CastToBytes(b []Duration) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len * DurationSizeBytes - s.Cap = h.Cap * DurationSizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*DurationSizeBytes)[:len(b)*DurationSizeBytes] } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl similarity index 86% rename from vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl index 8b18a924..ffae975c 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen.go.tmpl +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen.go.tmpl @@ -21,7 +21,7 @@ import ( "reflect" "unsafe" - "github.com/apache/arrow/go/v12/arrow/endian" + "github.com/apache/arrow/go/v14/arrow/endian" ) var ( @@ -68,26 +68,14 @@ func ({{.name}}Traits) PutValue(b []byte, v {{.Type}}) { func ({{.name}}Traits) CastFromBytes(b []byte) []{{.Type}} { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []{{.Type}} - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len/{{.Name}}SizeBytes - s.Cap = h.Cap/{{.Name}}SizeBytes - - return res + return unsafe.Slice((*{{.Type}})(unsafe.Pointer(h.Data)), cap(b)/{{.Name}}SizeBytes)[:len(b)/{{.Name}}SizeBytes] } // CastToBytes reinterprets the slice b to a slice of bytes. func ({{.name}}Traits) CastToBytes(b []{{.Type}}) []byte { h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - var res []byte - s := (*reflect.SliceHeader)(unsafe.Pointer(&res)) - s.Data = h.Data - s.Len = h.Len*{{.Name}}SizeBytes - s.Cap = h.Cap*{{.Name}}SizeBytes - - return res + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*{{.Name}}SizeBytes)[:len(b)*{{.Name}}SizeBytes] } // Copy copies src to dst. diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen_test.go.tmpl b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen_test.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl index 8bb1897f..96685f31 100644 --- a/vendor/github.com/apache/arrow/go/v12/arrow/type_traits_numeric.gen_test.go.tmpl +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_numeric.gen_test.go.tmpl @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) {{- range .In}} diff --git a/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go new file mode 100644 index 00000000..7c393b35 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v14/arrow/type_traits_timestamp.go @@ -0,0 +1,59 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package arrow + +import ( + "reflect" + "unsafe" + + "github.com/apache/arrow/go/v14/arrow/endian" +) + +var TimestampTraits timestampTraits + +const ( + // TimestampSizeBytes specifies the number of bytes required to store a single Timestamp in memory + TimestampSizeBytes = int(unsafe.Sizeof(Timestamp(0))) +) + +type timestampTraits struct{} + +// BytesRequired returns the number of bytes required to store n elements in memory. +func (timestampTraits) BytesRequired(n int) int { return TimestampSizeBytes * n } + +func (timestampTraits) PutValue(b []byte, v Timestamp) { + endian.Native.PutUint64(b, uint64(v)) +} + +// CastFromBytes reinterprets the slice b to a slice of type Timestamp. +// +// NOTE: len(b) must be a multiple of TimestampSizeBytes. +func (timestampTraits) CastFromBytes(b []byte) []Timestamp { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + return unsafe.Slice((*Timestamp)(unsafe.Pointer(h.Data)), cap(b)/TimestampSizeBytes)[:len(b)/TimestampSizeBytes] +} + +// CastToBytes reinterprets the slice b to a slice of bytes. +func (timestampTraits) CastToBytes(b []Timestamp) []byte { + h := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + + return unsafe.Slice((*byte)(unsafe.Pointer(h.Data)), cap(b)*TimestampSizeBytes)[:len(b)*TimestampSizeBytes] +} + +// Copy copies src to dst. +func (timestampTraits) Copy(dst, src []Timestamp) { copy(dst, src) } diff --git a/vendor/github.com/apache/arrow/go/v12/arrow/unionmode_string.go b/vendor/github.com/apache/arrow/go/v14/arrow/unionmode_string.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/arrow/unionmode_string.go rename to vendor/github.com/apache/arrow/go/v14/arrow/unionmode_string.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_block_counter.go b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_block_counter.go rename to vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go index 8f724ebb..86818bfd 100644 --- a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_block_counter.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_block_counter.go @@ -21,8 +21,8 @@ import ( "math/bits" "unsafe" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/internal/utils" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/internal/utils" ) func loadWord(byt []byte) uint64 { diff --git a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_run_reader.go b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_run_reader.go rename to vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go index 5ff8d518..a1686a49 100644 --- a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_run_reader.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_run_reader.go @@ -22,9 +22,9 @@ import ( "math/bits" "unsafe" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/internal/utils" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/internal/utils" ) // BitRun represents a run of bits with the same value of length Len diff --git a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_set_run_reader.go b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_set_run_reader.go rename to vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go index 6369c094..a2269ffe 100644 --- a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bit_set_run_reader.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bit_set_run_reader.go @@ -20,8 +20,8 @@ import ( "encoding/binary" "math/bits" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/internal/utils" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/internal/utils" ) // IsMultipleOf64 returns whether v is a multiple of 64. diff --git a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bitmap_generate.go b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/internal/bitutils/bitmap_generate.go rename to vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go index 1428e49e..78219d81 100644 --- a/vendor/github.com/apache/arrow/go/v12/internal/bitutils/bitmap_generate.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/bitutils/bitmap_generate.go @@ -16,7 +16,7 @@ package bitutils -import "github.com/apache/arrow/go/v12/arrow/bitutil" +import "github.com/apache/arrow/go/v14/arrow/bitutil" // GenerateBits writes sequential bits to a bitmap. Bits preceding the // initial start offset are preserved, bits following the bitmap may diff --git a/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go new file mode 100644 index 00000000..c1bdfeb6 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_funcs.go @@ -0,0 +1,90 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package hashing + +import ( + "math/bits" + "unsafe" + + "github.com/zeebo/xxh3" +) + +func hashInt(val uint64, alg uint64) uint64 { + // Two of xxhash's prime multipliers (which are chosen for their + // bit dispersion properties) + var multipliers = [2]uint64{11400714785074694791, 14029467366897019727} + // Multiplying by the prime number mixes the low bits into the high bits, + // then byte-swapping (which is a single CPU instruction) allows the + // combined high and low bits to participate in the initial hash table index. + return bits.ReverseBytes64(multipliers[alg] * val) +} + +func hashFloat32(val float32, alg uint64) uint64 { + // grab the raw byte pattern of the + bt := *(*[4]byte)(unsafe.Pointer(&val)) + x := uint64(*(*uint32)(unsafe.Pointer(&bt[0]))) + hx := hashInt(x, alg) + hy := hashInt(x, alg^1) + return 4 ^ hx ^ hy +} + +func hashFloat64(val float64, alg uint64) uint64 { + bt := *(*[8]byte)(unsafe.Pointer(&val)) + hx := hashInt(uint64(*(*uint32)(unsafe.Pointer(&bt[4]))), alg) + hy := hashInt(uint64(*(*uint32)(unsafe.Pointer(&bt[0]))), alg^1) + return 8 ^ hx ^ hy +} + +// prime constants used for slightly increasing the hash quality further +var exprimes = [2]uint64{1609587929392839161, 9650029242287828579} + +// for smaller amounts of bytes this is faster than even calling into +// xxh3 to do the Hash, so we specialize in order to get the benefits +// of that performance. +func Hash(b []byte, alg uint64) uint64 { + n := uint32(len(b)) + if n <= 16 { + switch { + case n > 8: + // 8 < length <= 16 + // apply same principle as above, but as two 64-bit ints + x := *(*uint64)(unsafe.Pointer(&b[n-8])) + y := *(*uint64)(unsafe.Pointer(&b[0])) + hx := hashInt(x, alg) + hy := hashInt(y, alg^1) + return uint64(n) ^ hx ^ hy + case n >= 4: + // 4 < length <= 8 + // we can read the bytes as two overlapping 32-bit ints, apply different + // hash functions to each in parallel + // then xor the results + x := *(*uint32)(unsafe.Pointer(&b[n-4])) + y := *(*uint32)(unsafe.Pointer(&b[0])) + hx := hashInt(uint64(x), alg) + hy := hashInt(uint64(y), alg^1) + return uint64(n) ^ hx ^ hy + case n > 0: + x := uint32((n << 24) ^ (uint32(b[0]) << 16) ^ (uint32(b[n/2]) << 8) ^ uint32(b[n-1])) + return hashInt(uint64(x), alg) + case n == 0: + return 1 + } + } + + // increase differentiation enough to improve hash quality + return xxh3.Hash(b) + exprimes[alg] +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_on.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go similarity index 81% rename from vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_on.go rename to vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go index 8d610609..b772c7d7 100644 --- a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/log_on.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string.go @@ -14,19 +14,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build debug +//go:build go1.20 || tinygo -package debug +package hashing -import ( - "log" - "os" -) +import "unsafe" -var ( - debug = log.New(os.Stderr, "[D] ", log.LstdFlags) -) - -func Log(msg interface{}) { - debug.Println(msg) +func hashString(val string, alg uint64) uint64 { + buf := unsafe.Slice(unsafe.StringData(val), len(val)) + return Hash(buf, alg) } diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/tools.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go similarity index 57% rename from vendor/github.com/apache/arrow/go/v12/parquet/tools.go rename to vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go index b9ce84de..f38eb5c5 100644 --- a/vendor/github.com/apache/arrow/go/v12/parquet/tools.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/hash_string_go1.19.go @@ -14,13 +14,24 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build tools +//go:build !go1.20 && !tinygo -package tools +package hashing import ( - _ "github.com/klauspost/asmfmt/cmd/asmfmt" - _ "github.com/minio/asm2plan9s" - _ "github.com/minio/c2goasm" - _ "golang.org/x/tools/cmd/stringer" + "reflect" + "unsafe" ) + +func hashString(val string, alg uint64) uint64 { + if val == "" { + return Hash([]byte{}, alg) + } + // highly efficient way to get byte slice without copy before + // the introduction of unsafe.StringData in go1.20 + // (https://stackoverflow.com/questions/59209493/how-to-use-unsafe-get-a-byte-slice-from-a-string-without-memory-copy) + const MaxInt32 = 1<<31 - 1 + buf := (*[MaxInt32]byte)(unsafe.Pointer((*reflect.StringHeader)( + unsafe.Pointer(&val)).Data))[: len(val)&MaxInt32 : len(val)&MaxInt32] + return Hash(buf, alg) +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/types.tmpldata b/vendor/github.com/apache/arrow/go/v14/internal/hashing/types.tmpldata similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/hashing/types.tmpldata rename to vendor/github.com/apache/arrow/go/v14/internal/hashing/types.tmpldata diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go similarity index 98% rename from vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go rename to vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go index 1ab3bf22..cc996552 100644 --- a/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go @@ -21,9 +21,9 @@ package hashing import ( "math" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/internal/utils" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/internal/utils" ) type payloadInt8 struct { @@ -298,6 +298,11 @@ func (s *Int8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err e return } +// GetOrInsertBytes is unimplemented +func (s *Int8MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} + type payloadUint8 struct { val uint8 memoIdx int32 @@ -570,6 +575,11 @@ func (s *Uint8MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err return } +// GetOrInsertBytes is unimplemented +func (s *Uint8MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} + type payloadInt16 struct { val int16 memoIdx int32 @@ -842,6 +852,11 @@ func (s *Int16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err return } +// GetOrInsertBytes is unimplemented +func (s *Int16MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} + type payloadUint16 struct { val uint16 memoIdx int32 @@ -1114,6 +1129,11 @@ func (s *Uint16MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err return } +// GetOrInsertBytes is unimplemented +func (s *Uint16MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} + type payloadInt32 struct { val int32 memoIdx int32 @@ -1386,6 +1406,11 @@ func (s *Int32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err return } +// GetOrInsertBytes is unimplemented +func (s *Int32MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} + type payloadInt64 struct { val int64 memoIdx int32 @@ -1658,6 +1683,11 @@ func (s *Int64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err return } +// GetOrInsertBytes is unimplemented +func (s *Int64MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} + type payloadUint32 struct { val uint32 memoIdx int32 @@ -1930,6 +1960,11 @@ func (s *Uint32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err return } +// GetOrInsertBytes is unimplemented +func (s *Uint32MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} + type payloadUint64 struct { val uint64 memoIdx int32 @@ -2202,6 +2237,11 @@ func (s *Uint64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, err return } +// GetOrInsertBytes is unimplemented +func (s *Uint64MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} + type payloadFloat32 struct { val float32 memoIdx int32 @@ -2493,6 +2533,11 @@ func (s *Float32MemoTable) GetOrInsert(val interface{}) (idx int, found bool, er return } +// GetOrInsertBytes is unimplemented +func (s *Float32MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} + type payloadFloat64 struct { val float64 memoIdx int32 @@ -2781,3 +2826,8 @@ func (s *Float64MemoTable) GetOrInsert(val interface{}) (idx int, found bool, er } return } + +// GetOrInsertBytes is unimplemented +func (s *Float64MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl similarity index 97% rename from vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl index 60665788..25164341 100644 --- a/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.gen.go.tmpl +++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.gen.go.tmpl @@ -17,8 +17,8 @@ package hashing import ( - "github.com/apache/arrow/go/v12/arrow/bitutil" - "github.com/apache/arrow/go/v12/internal/utils" + "github.com/apache/arrow/go/v14/arrow/bitutil" + "github.com/apache/arrow/go/v14/internal/utils" ) {{range .In}} @@ -340,4 +340,10 @@ func (s *{{.Name}}MemoTable) GetOrInsert(val interface{}) (idx int, found bool, } return } + + +// GetOrInsertBytes is unimplemented +func (s *{{.Name}}MemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + panic("unimplemented") +} {{end}} diff --git a/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.go b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go similarity index 80% rename from vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.go rename to vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go index fe4f37f2..81994f0a 100644 --- a/vendor/github.com/apache/arrow/go/v12/internal/hashing/xxh3_memo_table.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/hashing/xxh3_memo_table.go @@ -22,13 +22,8 @@ package hashing import ( "bytes" "math" - "math/bits" "reflect" "unsafe" - - "github.com/apache/arrow/go/v12/parquet" - - "github.com/zeebo/xxh3" ) //go:generate go run ../../arrow/_tools/tmpl/main.go -i -data=types.tmpldata xxh3_memo_table.gen.go.tmpl @@ -37,6 +32,10 @@ type TypeTraits interface { BytesRequired(n int) int } +type ByteSlice interface { + Bytes() []byte +} + // MemoTable interface for hash tables and dictionary encoding. // // Values will remember the order they are inserted to generate a valid @@ -54,6 +53,12 @@ type MemoTable interface { // the table (if false, the value was inserted). An error is returned // if val is not the appropriate type for the table. GetOrInsert(val interface{}) (idx int, existed bool, err error) + // GetOrInsertBytes returns the index of the table the specified value is, + // and a boolean indicating whether or not the value was found in + // the table (if false, the value was inserted). An error is returned + // if val is not the appropriate type for the table. This function is intended to be used by + // the BinaryMemoTable to prevent uncessary allocations of the data when converting from a []byte to interface{}. + GetOrInsertBytes(val []byte) (idx int, existed bool, err error) // GetOrInsertNull returns the index of the null value in the table, // inserting one if it hasn't already been inserted. It returns a boolean // indicating if the null value already existed or not in the table. @@ -76,78 +81,6 @@ type NumericMemoTable interface { WriteOutSubsetLE(offset int, out []byte) } -func hashInt(val uint64, alg uint64) uint64 { - // Two of xxhash's prime multipliers (which are chosen for their - // bit dispersion properties) - var multipliers = [2]uint64{11400714785074694791, 14029467366897019727} - // Multiplying by the prime number mixes the low bits into the high bits, - // then byte-swapping (which is a single CPU instruction) allows the - // combined high and low bits to participate in the initial hash table index. - return bits.ReverseBytes64(multipliers[alg] * val) -} - -func hashFloat32(val float32, alg uint64) uint64 { - // grab the raw byte pattern of the - bt := *(*[4]byte)(unsafe.Pointer(&val)) - x := uint64(*(*uint32)(unsafe.Pointer(&bt[0]))) - hx := hashInt(x, alg) - hy := hashInt(x, alg^1) - return 4 ^ hx ^ hy -} - -func hashFloat64(val float64, alg uint64) uint64 { - bt := *(*[8]byte)(unsafe.Pointer(&val)) - hx := hashInt(uint64(*(*uint32)(unsafe.Pointer(&bt[4]))), alg) - hy := hashInt(uint64(*(*uint32)(unsafe.Pointer(&bt[0]))), alg^1) - return 8 ^ hx ^ hy -} - -func hashString(val string, alg uint64) uint64 { - buf := *(*[]byte)(unsafe.Pointer(&val)) - (*reflect.SliceHeader)(unsafe.Pointer(&buf)).Cap = len(val) - return hash(buf, alg) -} - -// prime constants used for slightly increasing the hash quality further -var exprimes = [2]uint64{1609587929392839161, 9650029242287828579} - -// for smaller amounts of bytes this is faster than even calling into -// xxh3 to do the hash, so we specialize in order to get the benefits -// of that performance. -func hash(b []byte, alg uint64) uint64 { - n := uint32(len(b)) - if n <= 16 { - switch { - case n > 8: - // 8 < length <= 16 - // apply same principle as above, but as two 64-bit ints - x := *(*uint64)(unsafe.Pointer(&b[n-8])) - y := *(*uint64)(unsafe.Pointer(&b[0])) - hx := hashInt(x, alg) - hy := hashInt(y, alg^1) - return uint64(n) ^ hx ^ hy - case n >= 4: - // 4 < length <= 8 - // we can read the bytes as two overlapping 32-bit ints, apply different - // hash functions to each in parallel - // then xor the results - x := *(*uint32)(unsafe.Pointer(&b[n-4])) - y := *(*uint32)(unsafe.Pointer(&b[0])) - hx := hashInt(uint64(x), alg) - hy := hashInt(uint64(y), alg^1) - return uint64(n) ^ hx ^ hy - case n > 0: - x := uint32((n << 24) ^ (uint32(b[0]) << 16) ^ (uint32(b[n/2]) << 8) ^ uint32(b[n-1])) - return hashInt(uint64(x), alg) - case n == 0: - return 1 - } - } - - // increase differentiation enough to improve hash quality - return xxh3.Hash(b) + exprimes[alg] -} - const ( sentinel uint64 = 0 loadFactor int64 = 2 @@ -241,16 +174,14 @@ func (s *BinaryMemoTable) Size() int { } // helper function to easily return a byte slice for any given value -// regardless of the type if it's a []byte, parquet.ByteArray, -// parquet.FixedLenByteArray or string. +// regardless of the type if it's a []byte, string, or fulfills the +// ByteSlice interface. func (BinaryMemoTable) valAsByteSlice(val interface{}) []byte { switch v := val.(type) { case []byte: return v - case parquet.ByteArray: - return *(*[]byte)(unsafe.Pointer(&v)) - case parquet.FixedLenByteArray: - return *(*[]byte)(unsafe.Pointer(&v)) + case ByteSlice: + return v.Bytes() case string: var out []byte h := (*reflect.StringHeader)(unsafe.Pointer(&v)) @@ -270,11 +201,9 @@ func (BinaryMemoTable) getHash(val interface{}) uint64 { case string: return hashString(v, 0) case []byte: - return hash(v, 0) - case parquet.ByteArray: - return hash(*(*[]byte)(unsafe.Pointer(&v)), 0) - case parquet.FixedLenByteArray: - return hash(*(*[]byte)(unsafe.Pointer(&v)), 0) + return Hash(v, 0) + case ByteSlice: + return Hash(v.Bytes(), 0) default: panic("invalid type for binarymemotable") } @@ -288,10 +217,8 @@ func (b *BinaryMemoTable) appendVal(val interface{}) { b.builder.AppendString(v) case []byte: b.builder.Append(v) - case parquet.ByteArray: - b.builder.Append(*(*[]byte)(unsafe.Pointer(&v))) - case parquet.FixedLenByteArray: - b.builder.Append(*(*[]byte)(unsafe.Pointer(&v))) + case ByteSlice: + b.builder.Append(v.Bytes()) } } @@ -310,6 +237,22 @@ func (b *BinaryMemoTable) Get(val interface{}) (int, bool) { return KeyNotFound, false } +// GetOrInsertBytes returns the index of the given value in the table, if not found +// it is inserted into the table. The return value 'found' indicates whether the value +// was found in the table (true) or inserted (false) along with any possible error. +func (b *BinaryMemoTable) GetOrInsertBytes(val []byte) (idx int, found bool, err error) { + h := Hash(val, 0) + p, found := b.lookup(h, val) + if found { + idx = int(p.payload.val) + } else { + idx = b.Size() + b.builder.Append(val) + b.tbl.Insert(p, h, int32(idx), -1) + } + return +} + // GetOrInsert returns the index of the given value in the table, if not found // it is inserted into the table. The return value 'found' indicates whether the value // was found in the table (true) or inserted (false) along with any possible error. @@ -339,6 +282,10 @@ func (b *BinaryMemoTable) GetOrInsertNull() (idx int, found bool) { return } +func (b *BinaryMemoTable) Value(i int) []byte { + return b.builder.Value(i) +} + // helper function to get the offset into the builder data for a given // index value. func (b *BinaryMemoTable) findOffset(idx int) uintptr { diff --git a/vendor/github.com/apache/arrow/go/v14/internal/json/json.go b/vendor/github.com/apache/arrow/go/v14/internal/json/json.go new file mode 100644 index 00000000..319b12c5 --- /dev/null +++ b/vendor/github.com/apache/arrow/go/v14/internal/json/json.go @@ -0,0 +1,51 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !tinygo +// +build !tinygo + +package json + +import ( + "io" + + "github.com/goccy/go-json" +) + +type Decoder = json.Decoder +type Encoder = json.Encoder +type Marshaler = json.Marshaler +type Delim = json.Delim +type UnmarshalTypeError = json.UnmarshalTypeError +type Number = json.Number +type Unmarshaler = json.Unmarshaler +type RawMessage = json.RawMessage + +func Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +func Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +func NewDecoder(r io.Reader) *Decoder { + return json.NewDecoder(r) +} + +func NewEncoder(w io.Writer) *Encoder { + return json.NewEncoder(w) +} diff --git a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_on.go b/vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go similarity index 55% rename from vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_on.go rename to vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go index 188e6831..8e4f447b 100644 --- a/vendor/github.com/apache/arrow/go/v12/parquet/internal/debug/assert_on.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/json/json_tinygo.go @@ -14,15 +14,38 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build assert +//go:build tinygo +// +build tinygo -package debug +package json -// Assert will panic with msg if cond is false. -// -// msg should be a string or fmt.Stringer -func Assert(cond bool, msg interface{}) { - if !cond { - panic(msg) - } +import ( + "io" + + "encoding/json" +) + +type Decoder = json.Decoder +type Encoder = json.Encoder +type Marshaler = json.Marshaler +type Delim = json.Delim +type UnmarshalTypeError = json.UnmarshalTypeError +type Number = json.Number +type Unmarshaler = json.Unmarshaler +type RawMessage = json.RawMessage + +func Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +func Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +func NewDecoder(r io.Reader) *Decoder { + return json.NewDecoder(r) +} + +func NewEncoder(w io.Writer) *Encoder { + return json.NewEncoder(w) } diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/Makefile b/vendor/github.com/apache/arrow/go/v14/internal/utils/Makefile similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/Makefile rename to vendor/github.com/apache/arrow/go/v14/internal/utils/Makefile diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/buf_reader.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/buf_reader.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/buf_reader.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/buf_reader.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/endians_default.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_default.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/endians_default.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/endians_default.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/endians_s390x.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/endians_s390x.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/endians_s390x.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/endians_s390x.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/math.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/math.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/math.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/math.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_amd64.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_arm64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_arm64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_arm64.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_arm64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_avx2_amd64.s rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_neon_arm64.s rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_neon_arm64.s diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_noasm.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_noasm.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_noasm.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_noasm.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_ppc64le.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_ppc64le.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_ppc64le.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_ppc64le.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_s390x.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_s390x.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_s390x.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_s390x.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/min_max_sse4_amd64.s rename to vendor/github.com/apache/arrow/go/v14/internal/utils/min_max_sse4_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.tmpldata b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.tmpldata similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints.tmpldata rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints.tmpldata diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_amd64.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_amd64.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_arm64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_arm64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_arm64.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_arm64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_avx2_amd64.s rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_avx2_amd64.s diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_def.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go similarity index 99% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_def.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go index c9e1c84d..cc3b0abb 100644 --- a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_def.go +++ b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_def.go @@ -19,7 +19,7 @@ package utils import ( "errors" - "github.com/apache/arrow/go/v12/arrow" + "github.com/apache/arrow/go/v14/arrow" ) //go:generate go run ../../arrow/_tools/tmpl -i -data=transpose_ints.tmpldata -d arch=avx2 transpose_ints_simd.go.tmpl=transpose_ints_avx2_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_noasm.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_noasm.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_ppc64le.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_ppc64le.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_ppc64le.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_ppc64le.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_s390x.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_s390x.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_simd.go.tmpl b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_simd.go.tmpl similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_simd.go.tmpl rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_simd.go.tmpl diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.go b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.go similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.go rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.go diff --git a/vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.s b/vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.s similarity index 100% rename from vendor/github.com/apache/arrow/go/v12/internal/utils/transpose_ints_sse4_amd64.s rename to vendor/github.com/apache/arrow/go/v14/internal/utils/transpose_ints_sse4_amd64.s diff --git a/vendor/github.com/apache/thrift/NOTICE b/vendor/github.com/apache/thrift/NOTICE deleted file mode 100644 index 37824e7f..00000000 --- a/vendor/github.com/apache/thrift/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Apache Thrift -Copyright (C) 2006 - 2019, The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go deleted file mode 100644 index ed85a645..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -const ( - UNKNOWN_APPLICATION_EXCEPTION = 0 - UNKNOWN_METHOD = 1 - INVALID_MESSAGE_TYPE_EXCEPTION = 2 - WRONG_METHOD_NAME = 3 - BAD_SEQUENCE_ID = 4 - MISSING_RESULT = 5 - INTERNAL_ERROR = 6 - PROTOCOL_ERROR = 7 - INVALID_TRANSFORM = 8 - INVALID_PROTOCOL = 9 - UNSUPPORTED_CLIENT_TYPE = 10 -) - -var defaultApplicationExceptionMessage = map[int32]string{ - UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception", - UNKNOWN_METHOD: "unknown method", - INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type", - WRONG_METHOD_NAME: "wrong method name", - BAD_SEQUENCE_ID: "bad sequence ID", - MISSING_RESULT: "missing result", - INTERNAL_ERROR: "unknown internal error", - PROTOCOL_ERROR: "unknown protocol error", - INVALID_TRANSFORM: "Invalid transform", - INVALID_PROTOCOL: "Invalid protocol", - UNSUPPORTED_CLIENT_TYPE: "Unsupported client type", -} - -// Application level Thrift exception -type TApplicationException interface { - TException - TypeId() int32 - Read(ctx context.Context, iprot TProtocol) error - Write(ctx context.Context, oprot TProtocol) error -} - -type tApplicationException struct { - message string - type_ int32 -} - -var _ TApplicationException = (*tApplicationException)(nil) - -func (tApplicationException) TExceptionType() TExceptionType { - return TExceptionTypeApplication -} - -func (e tApplicationException) Error() string { - if e.message != "" { - return e.message - } - return defaultApplicationExceptionMessage[e.type_] -} - -func NewTApplicationException(type_ int32, message string) TApplicationException { - return &tApplicationException{message, type_} -} - -func (p *tApplicationException) TypeId() int32 { - return p.type_ -} - -func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error { - // TODO: this should really be generated by the compiler - _, err := iprot.ReadStructBegin(ctx) - if err != nil { - return err - } - - message := "" - type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) - - for { - _, ttype, id, err := iprot.ReadFieldBegin(ctx) - if err != nil { - return err - } - if ttype == STOP { - break - } - switch id { - case 1: - if ttype == STRING { - if message, err = iprot.ReadString(ctx); err != nil { - return err - } - } else { - if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { - return err - } - } - case 2: - if ttype == I32 { - if type_, err = iprot.ReadI32(ctx); err != nil { - return err - } - } else { - if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { - return err - } - } - default: - if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil { - return err - } - } - if err = iprot.ReadFieldEnd(ctx); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(ctx); err != nil { - return err - } - - p.message = message - p.type_ = type_ - - return nil -} - -func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) { - err = oprot.WriteStructBegin(ctx, "TApplicationException") - if err != nil { - return - } - if len(p.Error()) > 0 { - err = oprot.WriteFieldBegin(ctx, "message", STRING, 1) - if err != nil { - return - } - err = oprot.WriteString(ctx, p.Error()) - if err != nil { - return - } - err = oprot.WriteFieldEnd(ctx) - if err != nil { - return - } - } - err = oprot.WriteFieldBegin(ctx, "type", I32, 2) - if err != nil { - return - } - err = oprot.WriteI32(ctx, p.type_) - if err != nil { - return - } - err = oprot.WriteFieldEnd(ctx) - if err != nil { - return - } - err = oprot.WriteFieldStop(ctx) - if err != nil { - return - } - err = oprot.WriteStructEnd(ctx) - return -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go deleted file mode 100644 index 3ed6608e..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go +++ /dev/null @@ -1,548 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - "io" - "math" -) - -type TBinaryProtocol struct { - trans TRichTransport - origTransport TTransport - cfg *TConfiguration - buffer [64]byte -} - -type TBinaryProtocolFactory struct { - cfg *TConfiguration -} - -// Deprecated: Use NewTBinaryProtocolConf instead. -func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { - return NewTBinaryProtocolConf(t, &TConfiguration{ - noPropagation: true, - }) -} - -// Deprecated: Use NewTBinaryProtocolConf instead. -func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { - return NewTBinaryProtocolConf(t, &TConfiguration{ - TBinaryStrictRead: &strictRead, - TBinaryStrictWrite: &strictWrite, - - noPropagation: true, - }) -} - -func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol { - PropagateTConfiguration(t, conf) - p := &TBinaryProtocol{ - origTransport: t, - cfg: conf, - } - if et, ok := t.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(t) - } - return p -} - -// Deprecated: Use NewTBinaryProtocolFactoryConf instead. -func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { - return NewTBinaryProtocolFactoryConf(&TConfiguration{ - noPropagation: true, - }) -} - -// Deprecated: Use NewTBinaryProtocolFactoryConf instead. -func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { - return NewTBinaryProtocolFactoryConf(&TConfiguration{ - TBinaryStrictRead: &strictRead, - TBinaryStrictWrite: &strictWrite, - - noPropagation: true, - }) -} - -func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory { - return &TBinaryProtocolFactory{ - cfg: conf, - } -} - -func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { - return NewTBinaryProtocolConf(t, p.cfg) -} - -func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -/** - * Writing Methods - */ - -func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { - if p.cfg.GetTBinaryStrictWrite() { - version := uint32(VERSION_1) | uint32(typeId) - e := p.WriteI32(ctx, int32(version)) - if e != nil { - return e - } - e = p.WriteString(ctx, name) - if e != nil { - return e - } - e = p.WriteI32(ctx, seqId) - return e - } else { - e := p.WriteString(ctx, name) - if e != nil { - return e - } - e = p.WriteByte(ctx, int8(typeId)) - if e != nil { - return e - } - e = p.WriteI32(ctx, seqId) - return e - } - return nil -} - -func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - e := p.WriteByte(ctx, int8(typeId)) - if e != nil { - return e - } - e = p.WriteI16(ctx, id) - return e -} - -func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error { - e := p.WriteByte(ctx, STOP) - return e -} - -func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - e := p.WriteByte(ctx, int8(keyType)) - if e != nil { - return e - } - e = p.WriteByte(ctx, int8(valueType)) - if e != nil { - return e - } - e = p.WriteI32(ctx, int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - e := p.WriteByte(ctx, int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(ctx, int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - e := p.WriteByte(ctx, int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(ctx, int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error { - if value { - return p.WriteByte(ctx, 1) - } - return p.WriteByte(ctx, 0) -} - -func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error { - e := p.trans.WriteByte(byte(value)) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error { - v := p.buffer[0:2] - binary.BigEndian.PutUint16(v, uint16(value)) - _, e := p.trans.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error { - v := p.buffer[0:4] - binary.BigEndian.PutUint32(v, uint32(value)) - _, e := p.trans.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error { - v := p.buffer[0:8] - binary.BigEndian.PutUint64(v, uint64(value)) - _, err := p.trans.Write(v) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error { - return p.WriteI64(ctx, int64(math.Float64bits(value))) -} - -func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error { - e := p.WriteI32(ctx, int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.WriteString(value) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error { - e := p.WriteI32(ctx, int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.Write(value) - return NewTProtocolException(err) -} - -/** - * Reading methods - */ - -func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - size, e := p.ReadI32(ctx) - if e != nil { - return "", typeId, 0, NewTProtocolException(e) - } - if size < 0 { - typeId = TMessageType(size & 0x0ff) - version := int64(int64(size) & VERSION_MASK) - if version != VERSION_1 { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) - } - name, e = p.ReadString(ctx) - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - seqId, e = p.ReadI32(ctx) - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - return name, typeId, seqId, nil - } - if p.cfg.GetTBinaryStrictRead() { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) - } - name, e2 := p.readStringBody(size) - if e2 != nil { - return name, typeId, seqId, e2 - } - b, e3 := p.ReadByte(ctx) - if e3 != nil { - return name, typeId, seqId, e3 - } - typeId = TMessageType(b) - seqId, e4 := p.ReadI32(ctx) - if e4 != nil { - return name, typeId, seqId, e4 - } - return name, typeId, seqId, nil -} - -func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - return -} - -func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) { - t, err := p.ReadByte(ctx) - typeId = TType(t) - if err != nil { - return name, typeId, seqId, err - } - if t != STOP { - seqId, err = p.ReadI16(ctx) - } - return name, typeId, seqId, err -} - -func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) { - k, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - kType = TType(k) - v, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - vType = TType(v) - size32, e := p.ReadI32(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - err = checkSizeForProtocol(size32, p.cfg) - if err != nil { - return - } - size = int(size32) - return kType, vType, size, nil -} - -func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - b, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - err = checkSizeForProtocol(size32, p.cfg) - if err != nil { - return - } - size = int(size32) - - return -} - -func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - b, e := p.ReadByte(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32(ctx) - if e != nil { - err = NewTProtocolException(e) - return - } - err = checkSizeForProtocol(size32, p.cfg) - if err != nil { - return - } - size = int(size32) - return elemType, size, nil -} - -func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error { - return nil -} - -func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) { - b, e := p.ReadByte(ctx) - v := true - if b != 1 { - v = false - } - return v, e -} - -func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.trans.ReadByte() - return int8(v), err -} - -func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) { - buf := p.buffer[0:2] - err = p.readAll(ctx, buf) - value = int16(binary.BigEndian.Uint16(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) { - buf := p.buffer[0:4] - err = p.readAll(ctx, buf) - value = int32(binary.BigEndian.Uint32(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) { - buf := p.buffer[0:8] - err = p.readAll(ctx, buf) - value = int64(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - buf := p.buffer[0:8] - err = p.readAll(ctx, buf) - value = math.Float64frombits(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) { - size, e := p.ReadI32(ctx) - if e != nil { - return "", e - } - err = checkSizeForProtocol(size, p.cfg) - if err != nil { - return - } - if size == 0 { - return "", nil - } - if size < int32(len(p.buffer)) { - // Avoid allocation on small reads - buf := p.buffer[:size] - read, e := io.ReadFull(p.trans, buf) - return string(buf[:read]), NewTProtocolException(e) - } - - return p.readStringBody(size) -} - -func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) { - size, e := p.ReadI32(ctx) - if e != nil { - return nil, e - } - if err := checkSizeForProtocol(size, p.cfg); err != nil { - return nil, err - } - - buf, err := safeReadBytes(size, p.trans) - return buf, NewTProtocolException(err) -} - -func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.trans.Flush(ctx)) -} - -func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TBinaryProtocol) Transport() TTransport { - return p.origTransport -} - -func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) { - var read int - _, deadlineSet := ctx.Deadline() - for { - read, err = io.ReadFull(p.trans, buf) - if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil { - // This is I/O timeout without anything read, - // and we still have time left, keep retrying. - continue - } - // For anything else, don't retry - break - } - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { - buf, err := safeReadBytes(size, p.trans) - return string(buf), NewTProtocolException(err) -} - -func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.trans, conf) - PropagateTConfiguration(p.origTransport, conf) - p.cfg = conf -} - -var ( - _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil) - _ TConfigurationSetter = (*TBinaryProtocol)(nil) -) - -// This function is shared between TBinaryProtocol and TCompactProtocol. -// -// It tries to read size bytes from trans, in a way that prevents large -// allocations when size is insanely large (mostly caused by malformed message). -func safeReadBytes(size int32, trans io.Reader) ([]byte, error) { - if size < 0 { - return nil, nil - } - - buf := new(bytes.Buffer) - _, err := io.CopyN(buf, trans, int64(size)) - return buf.Bytes(), err -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/buf_pool.go b/vendor/github.com/apache/thrift/lib/go/thrift/buf_pool.go deleted file mode 100644 index 9708ea0e..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/buf_pool.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -// getBufFromPool gets a buffer out of the pool and guarantees that it's reset -// before return. -func getBufFromPool() *bytes.Buffer { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - return buf -} - -// returnBufToPool returns a buffer to the pool, and sets it to nil to avoid -// accidental usage after it's returned. -// -// You usually want to use it this way: -// -// buf := getBufFromPool() -// defer returnBufToPool(&buf) -// // use buf -func returnBufToPool(buf **bytes.Buffer) { - bufPool.Put(*buf) - *buf = nil -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go deleted file mode 100644 index aa551b4a..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "context" -) - -type TBufferedTransportFactory struct { - size int -} - -type TBufferedTransport struct { - bufio.ReadWriter - tp TTransport -} - -func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - return NewTBufferedTransport(trans, p.size), nil -} - -func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { - return &TBufferedTransportFactory{size: bufferSize} -} - -func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { - return &TBufferedTransport{ - ReadWriter: bufio.ReadWriter{ - Reader: bufio.NewReaderSize(trans, bufferSize), - Writer: bufio.NewWriterSize(trans, bufferSize), - }, - tp: trans, - } -} - -func (p *TBufferedTransport) IsOpen() bool { - return p.tp.IsOpen() -} - -func (p *TBufferedTransport) Open() (err error) { - return p.tp.Open() -} - -func (p *TBufferedTransport) Close() (err error) { - return p.tp.Close() -} - -func (p *TBufferedTransport) Read(b []byte) (int, error) { - n, err := p.ReadWriter.Read(b) - if err != nil { - p.ReadWriter.Reader.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Write(b []byte) (int, error) { - n, err := p.ReadWriter.Write(b) - if err != nil { - p.ReadWriter.Writer.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Flush(ctx context.Context) error { - if err := p.ReadWriter.Flush(); err != nil { - p.ReadWriter.Writer.Reset(p.tp) - return err - } - return p.tp.Flush(ctx) -} - -func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { - return p.tp.RemainingBytes() -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.tp, conf) -} - -var _ TConfigurationSetter = (*TBufferedTransport)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/client.go b/vendor/github.com/apache/thrift/lib/go/thrift/client.go deleted file mode 100644 index ea2c01fd..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/client.go +++ /dev/null @@ -1,109 +0,0 @@ -package thrift - -import ( - "context" - "fmt" -) - -// ResponseMeta represents the metadata attached to the response. -type ResponseMeta struct { - // The headers in the response, if any. - // If the underlying transport/protocol is not THeader, this will always be nil. - Headers THeaderMap -} - -type TClient interface { - Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) -} - -type TStandardClient struct { - seqId int32 - iprot, oprot TProtocol -} - -// TStandardClient implements TClient, and uses the standard message format for Thrift. -// It is not safe for concurrent use. -func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient { - return &TStandardClient{ - iprot: inputProtocol, - oprot: outputProtocol, - } -} - -func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error { - // Set headers from context object on THeaderProtocol - if headerProt, ok := oprot.(*THeaderProtocol); ok { - headerProt.ClearWriteHeaders() - for _, key := range GetWriteHeaderList(ctx) { - if value, ok := GetHeader(ctx, key); ok { - headerProt.SetWriteHeader(key, value) - } - } - } - - if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil { - return err - } - if err := args.Write(ctx, oprot); err != nil { - return err - } - if err := oprot.WriteMessageEnd(ctx); err != nil { - return err - } - return oprot.Flush(ctx) -} - -func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error { - rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx) - if err != nil { - return err - } - - if method != rMethod { - return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method)) - } else if seqId != rSeqId { - return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method)) - } else if rTypeId == EXCEPTION { - var exception tApplicationException - if err := exception.Read(ctx, iprot); err != nil { - return err - } - - if err := iprot.ReadMessageEnd(ctx); err != nil { - return err - } - - return &exception - } else if rTypeId != REPLY { - return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method)) - } - - if err := result.Read(ctx, iprot); err != nil { - return err - } - - return iprot.ReadMessageEnd(ctx) -} - -func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { - p.seqId++ - seqId := p.seqId - - if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { - return ResponseMeta{}, err - } - - // method is oneway - if result == nil { - return ResponseMeta{}, nil - } - - err := p.Recv(ctx, p.iprot, seqId, method, result) - var headers THeaderMap - if hp, ok := p.iprot.(*THeaderProtocol); ok { - headers = hp.transport.readHeaders - } - return ResponseMeta{ - Headers: headers, - }, err -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go deleted file mode 100644 index ff3999c0..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go +++ /dev/null @@ -1,846 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -const ( - COMPACT_PROTOCOL_ID = 0x082 - COMPACT_VERSION = 1 - COMPACT_VERSION_MASK = 0x1f - COMPACT_TYPE_MASK = 0x0E0 - COMPACT_TYPE_BITS = 0x07 - COMPACT_TYPE_SHIFT_AMOUNT = 5 -) - -type tCompactType byte - -const ( - COMPACT_BOOLEAN_TRUE = 0x01 - COMPACT_BOOLEAN_FALSE = 0x02 - COMPACT_BYTE = 0x03 - COMPACT_I16 = 0x04 - COMPACT_I32 = 0x05 - COMPACT_I64 = 0x06 - COMPACT_DOUBLE = 0x07 - COMPACT_BINARY = 0x08 - COMPACT_LIST = 0x09 - COMPACT_SET = 0x0A - COMPACT_MAP = 0x0B - COMPACT_STRUCT = 0x0C -) - -var ( - ttypeToCompactType map[TType]tCompactType -) - -func init() { - ttypeToCompactType = map[TType]tCompactType{ - STOP: STOP, - BOOL: COMPACT_BOOLEAN_TRUE, - BYTE: COMPACT_BYTE, - I16: COMPACT_I16, - I32: COMPACT_I32, - I64: COMPACT_I64, - DOUBLE: COMPACT_DOUBLE, - STRING: COMPACT_BINARY, - LIST: COMPACT_LIST, - SET: COMPACT_SET, - MAP: COMPACT_MAP, - STRUCT: COMPACT_STRUCT, - } -} - -type TCompactProtocolFactory struct { - cfg *TConfiguration -} - -// Deprecated: Use NewTCompactProtocolFactoryConf instead. -func NewTCompactProtocolFactory() *TCompactProtocolFactory { - return NewTCompactProtocolFactoryConf(&TConfiguration{ - noPropagation: true, - }) -} - -func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory { - return &TCompactProtocolFactory{ - cfg: conf, - } -} - -func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTCompactProtocolConf(trans, p.cfg) -} - -func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -type TCompactProtocol struct { - trans TRichTransport - origTransport TTransport - - cfg *TConfiguration - - // Used to keep track of the last field for the current and previous structs, - // so we can do the delta stuff. - lastField []int - lastFieldId int - - // If we encounter a boolean field begin, save the TField here so it can - // have the value incorporated. - booleanFieldName string - booleanFieldId int16 - booleanFieldPending bool - - // If we read a field header, and it's a boolean field, save the boolean - // value here so that readBool can use it. - boolValue bool - boolValueIsNotNull bool - buffer [64]byte -} - -// Deprecated: Use NewTCompactProtocolConf instead. -func NewTCompactProtocol(trans TTransport) *TCompactProtocol { - return NewTCompactProtocolConf(trans, &TConfiguration{ - noPropagation: true, - }) -} - -func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol { - PropagateTConfiguration(trans, conf) - p := &TCompactProtocol{ - origTransport: trans, - cfg: conf, - } - if et, ok := trans.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(trans) - } - - return p -} - -// -// Public Writing methods. -// - -// Write a message header to the wire. Compact Protocol messages contain the -// protocol version so we can migrate forwards in the future if need be. -func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { - err := p.writeByteDirect(COMPACT_PROTOCOL_ID) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) - if err != nil { - return NewTProtocolException(err) - } - _, err = p.writeVarint32(seqid) - if err != nil { - return NewTProtocolException(err) - } - e := p.WriteString(ctx, name) - return e - -} - -func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil } - -// Write a struct begin. This doesn't actually put anything on the wire. We -// use it as an opportunity to put special placeholder markers on the field -// stack so we can get the field id deltas correct. -func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return nil -} - -// Write a struct end. This doesn't actually put anything on the wire. We use -// this as an opportunity to pop the last field from the current struct off -// of the field stack. -func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error { - if len(p.lastField) <= 0 { - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before")) - } - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - if typeId == BOOL { - // we want to possibly include the value, so we'll wait. - p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true - return nil - } - _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF) - return NewTProtocolException(err) -} - -// The workhorse of writeFieldBegin. It has the option of doing a -// 'type override' of the type header. This is used specifically in the -// boolean field case. -func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) { - // short lastField = lastField_.pop(); - - // if there's a type override, use that. - var typeToWrite byte - if typeOverride == 0xFF { - typeToWrite = byte(p.getCompactType(typeId)) - } else { - typeToWrite = typeOverride - } - // check if we can use delta encoding for the field id - fieldId := int(id) - written := 0 - if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { - // write them together - err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) - if err != nil { - return 0, err - } - } else { - // write them separate - err := p.writeByteDirect(typeToWrite) - if err != nil { - return 0, err - } - err = p.WriteI16(ctx, id) - written = 1 + 2 - if err != nil { - return 0, err - } - } - - p.lastFieldId = fieldId - return written, nil -} - -func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil } - -func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error { - err := p.writeByteDirect(STOP) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - if size == 0 { - err := p.writeByteDirect(0) - return NewTProtocolException(err) - } - _, err := p.writeVarint32(int32(size)) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil } - -// Write a list header. -func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil } - -// Write a set header. -func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil } - -func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error { - v := byte(COMPACT_BOOLEAN_FALSE) - if value { - v = byte(COMPACT_BOOLEAN_TRUE) - } - if p.booleanFieldPending { - // we haven't written the field header yet - _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v) - p.booleanFieldPending = false - return NewTProtocolException(err) - } - // we're not part of a field, so just write the value. - err := p.writeByteDirect(v) - return NewTProtocolException(err) -} - -// Write a byte. Nothing to see here! -func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error { - err := p.writeByteDirect(byte(value)) - return NewTProtocolException(err) -} - -// Write an I16 as a zigzag varint. -func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error { - _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) - return NewTProtocolException(err) -} - -// Write an i32 as a zigzag varint. -func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error { - _, err := p.writeVarint32(p.int32ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write an i64 as a zigzag varint. -func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error { - _, err := p.writeVarint64(p.int64ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write a double to the wire as 8 bytes. -func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error { - buf := p.buffer[0:8] - binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) - _, err := p.trans.Write(buf) - return NewTProtocolException(err) -} - -// Write a string to the wire with a varint size preceding. -func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error { - _, e := p.writeVarint32(int32(len(value))) - if e != nil { - return NewTProtocolException(e) - } - if len(value) == 0 { - return nil - } - _, e = p.trans.WriteString(value) - return e -} - -// Write a byte array, using a varint for the size. -func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error { - _, e := p.writeVarint32(int32(len(bin))) - if e != nil { - return NewTProtocolException(e) - } - if len(bin) > 0 { - _, e = p.trans.Write(bin) - return NewTProtocolException(e) - } - return nil -} - -// -// Reading methods. -// - -// Read a message header. -func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - var protocolId byte - - _, deadlineSet := ctx.Deadline() - for { - protocolId, err = p.readByteDirect() - if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { - // keep retrying I/O timeout errors since we still have - // time left - continue - } - // For anything else, don't retry - break - } - if err != nil { - return - } - - if protocolId != COMPACT_PROTOCOL_ID { - e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) - return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) - } - - versionAndType, err := p.readByteDirect() - if err != nil { - return - } - - version := versionAndType & COMPACT_VERSION_MASK - typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) - if version != COMPACT_VERSION { - e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) - err = NewTProtocolExceptionWithType(BAD_VERSION, e) - return - } - seqId, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - name, err = p.ReadString(ctx) - return -} - -func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil } - -// Read a struct begin. There's nothing on the wire for this, but it is our -// opportunity to push a new struct begin marker onto the field stack. -func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return -} - -// Doesn't actually consume any wire data, just removes the last field for -// this struct from the field stack. -func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error { - // consume the last field we read off the wire. - if len(p.lastField) <= 0 { - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before")) - } - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -// Read a field header off the wire. -func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { - t, err := p.readByteDirect() - if err != nil { - return - } - - // if it's a stop, then we can return immediately, as the struct is over. - if (t & 0x0f) == STOP { - return "", STOP, 0, nil - } - - // mask off the 4 MSB of the type header. it could contain a field id delta. - modifier := int16((t & 0xf0) >> 4) - if modifier == 0 { - // not a delta. look ahead for the zigzag varint field id. - id, err = p.ReadI16(ctx) - if err != nil { - return - } - } else { - // has a delta. add the delta to the last read field id. - id = int16(p.lastFieldId) + modifier - } - typeId, e := p.getTType(tCompactType(t & 0x0f)) - if e != nil { - err = NewTProtocolException(e) - return - } - - // if this happens to be a boolean field, the value is encoded in the type - if p.isBoolType(t) { - // save the boolean value in a special instance variable. - p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) - p.boolValueIsNotNull = true - } - - // push the new field onto the field stack so we can keep the deltas going. - p.lastFieldId = int(id) - return -} - -func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil } - -// Read a map header off the wire. If the size is zero, skip reading the key -// and value type. This means that 0-length maps will yield TMaps without the -// "correct" types. -func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { - size32, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - err = checkSizeForProtocol(size32, p.cfg) - if err != nil { - return - } - size = int(size32) - - keyAndValueType := byte(STOP) - if size != 0 { - keyAndValueType, err = p.readByteDirect() - if err != nil { - return - } - } - keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) - valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) - return -} - -func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil } - -// Read a list header off the wire. If the list size is 0-14, the size will -// be packed into the element type header. If it's a longer list, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - size_and_type, err := p.readByteDirect() - if err != nil { - return - } - size = int((size_and_type >> 4) & 0x0f) - if size == 15 { - size2, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - size = int(size2) - } - err = checkSizeForProtocol(size32, p.cfg) - if err != nil { - return - } - elemType, e := p.getTType(tCompactType(size_and_type)) - if e != nil { - err = NewTProtocolException(e) - return - } - return -} - -func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil } - -// Read a set header off the wire. If the set size is 0-14, the size will -// be packed into the element type header. If it's a longer set, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - return p.ReadListBegin(ctx) -} - -func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil } - -// Read a boolean off the wire. If this is a boolean field, the value should -// already have been read during readFieldBegin, so we'll just consume the -// pre-stored value. Otherwise, read a byte. -func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) { - if p.boolValueIsNotNull { - p.boolValueIsNotNull = false - return p.boolValue, nil - } - v, err := p.readByteDirect() - return v == COMPACT_BOOLEAN_TRUE, err -} - -// Read a single byte off the wire. Nothing interesting here. -func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.readByteDirect() - if err != nil { - return 0, NewTProtocolException(err) - } - return int8(v), err -} - -// Read an i16 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) { - v, err := p.ReadI32(ctx) - return int16(v), err -} - -// Read an i32 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) { - v, e := p.readVarint32() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt32(v) - return value, nil -} - -// Read an i64 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) { - v, e := p.readVarint64() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt64(v) - return value, nil -} - -// No magic here - just read a double off the wire. -func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - longBits := p.buffer[0:8] - _, e := io.ReadFull(p.trans, longBits) - if e != nil { - return 0.0, NewTProtocolException(e) - } - return math.Float64frombits(p.bytesToUint64(longBits)), nil -} - -// Reads a []byte (via readBinary), and then UTF-8 decodes it. -func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) { - length, e := p.readVarint32() - if e != nil { - return "", NewTProtocolException(e) - } - err = checkSizeForProtocol(length, p.cfg) - if err != nil { - return - } - if length == 0 { - return "", nil - } - if length < int32(len(p.buffer)) { - // Avoid allocation on small reads - buf := p.buffer[:length] - read, e := io.ReadFull(p.trans, buf) - return string(buf[:read]), NewTProtocolException(e) - } - - buf, e := safeReadBytes(length, p.trans) - return string(buf), NewTProtocolException(e) -} - -// Read a []byte from the wire. -func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { - length, e := p.readVarint32() - if e != nil { - return nil, NewTProtocolException(e) - } - err = checkSizeForProtocol(length, p.cfg) - if err != nil { - return - } - if length == 0 { - return []byte{}, nil - } - - buf, e := safeReadBytes(length, p.trans) - return buf, NewTProtocolException(e) -} - -func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.trans.Flush(ctx)) -} - -func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TCompactProtocol) Transport() TTransport { - return p.origTransport -} - -// -// Internal writing methods -// - -// Abstract method for writing the start of lists and sets. List and sets on -// the wire differ only by the type indicator. -func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { - if size <= 14 { - return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) - } - err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) - if err != nil { - return 0, err - } - m, err := p.writeVarint32(int32(size)) - return 1 + m, err -} - -// Write an i32 as a varint. Results in 1-5 bytes on the wire. -// TODO(pomack): make a permanent buffer like writeVarint64? -func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { - i32buf := p.buffer[0:5] - idx := 0 - for { - if (n & ^0x7F) == 0 { - i32buf[idx] = byte(n) - idx++ - // p.writeByteDirect(byte(n)); - break - // return; - } else { - i32buf[idx] = byte((n & 0x7F) | 0x80) - idx++ - // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); - u := uint32(n) - n = int32(u >> 7) - } - } - return p.trans.Write(i32buf[0:idx]) -} - -// Write an i64 as a varint. Results in 1-10 bytes on the wire. -func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { - varint64out := p.buffer[0:10] - idx := 0 - for { - if (n & ^0x7F) == 0 { - varint64out[idx] = byte(n) - idx++ - break - } else { - varint64out[idx] = byte((n & 0x7F) | 0x80) - idx++ - u := uint64(n) - n = int64(u >> 7) - } - } - return p.trans.Write(varint64out[0:idx]) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { - return (l << 1) ^ (l >> 63) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { - return (n << 1) ^ (n >> 31) -} - -// Writes a byte without any possibility of all that field header nonsense. -// Used internally by other writing methods that know they need to write a byte. -func (p *TCompactProtocol) writeByteDirect(b byte) error { - return p.trans.WriteByte(b) -} - -// -// Internal reading methods -// - -// Read an i32 from the wire as a varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 5 bytes. -func (p *TCompactProtocol) readVarint32() (int32, error) { - // if the wire contains the right stuff, this will just truncate the i64 we - // read and get us the right sign. - v, err := p.readVarint64() - return int32(v), err -} - -// Read an i64 from the wire as a proper varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 10 bytes. -func (p *TCompactProtocol) readVarint64() (int64, error) { - shift := uint(0) - result := int64(0) - for { - b, err := p.readByteDirect() - if err != nil { - return 0, err - } - result |= int64(b&0x7f) << shift - if (b & 0x80) != 0x80 { - break - } - shift += 7 - } - return result, nil -} - -// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. -func (p *TCompactProtocol) readByteDirect() (byte, error) { - return p.trans.ReadByte() -} - -// -// encoding helpers -// - -// Convert from zigzag int to int. -func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { - u := uint32(n) - return int32(u>>1) ^ -(n & 1) -} - -// Convert from zigzag long to long. -func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { - u := uint64(n) - return int64(u>>1) ^ -(n & 1) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { - return binary.LittleEndian.Uint64(b) -} - -// -// type testing and converting -// - -func (p *TCompactProtocol) isBoolType(b byte) bool { - return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE -} - -// Given a tCompactType constant, convert it to its corresponding -// TType value. -func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { - switch byte(t) & 0x0f { - case STOP: - return STOP, nil - case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: - return BOOL, nil - case COMPACT_BYTE: - return BYTE, nil - case COMPACT_I16: - return I16, nil - case COMPACT_I32: - return I32, nil - case COMPACT_I64: - return I64, nil - case COMPACT_DOUBLE: - return DOUBLE, nil - case COMPACT_BINARY: - return STRING, nil - case COMPACT_LIST: - return LIST, nil - case COMPACT_SET: - return SET, nil - case COMPACT_MAP: - return MAP, nil - case COMPACT_STRUCT: - return STRUCT, nil - } - return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f)) -} - -// Given a TType value, find the appropriate TCompactProtocol.Types constant. -func (p *TCompactProtocol) getCompactType(t TType) tCompactType { - return ttypeToCompactType[t] -} - -func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.trans, conf) - PropagateTConfiguration(p.origTransport, conf) - p.cfg = conf -} - -var ( - _ TConfigurationSetter = (*TCompactProtocolFactory)(nil) - _ TConfigurationSetter = (*TCompactProtocol)(nil) -) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go b/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go deleted file mode 100644 index de27edd6..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/configuration.go +++ /dev/null @@ -1,378 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "crypto/tls" - "fmt" - "time" -) - -// Default TConfiguration values. -const ( - DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024 - DEFAULT_MAX_FRAME_SIZE = 16384000 - - DEFAULT_TBINARY_STRICT_READ = false - DEFAULT_TBINARY_STRICT_WRITE = true - - DEFAULT_CONNECT_TIMEOUT = 0 - DEFAULT_SOCKET_TIMEOUT = 0 -) - -// TConfiguration defines some configurations shared between TTransport, -// TProtocol, TTransportFactory, TProtocolFactory, and other implementations. -// -// When constructing TConfiguration, you only need to specify the non-default -// fields. All zero values have sane default values. -// -// Not all configurations defined are applicable to all implementations. -// Implementations are free to ignore the configurations not applicable to them. -// -// All functions attached to this type are nil-safe. -// -// See [1] for spec. -// -// NOTE: When using TConfiguration, fill in all the configurations you want to -// set across the stack, not only the ones you want to set in the immediate -// TTransport/TProtocol. -// -// For example, say you want to migrate this old code into using TConfiguration: -// -// sccket, err := thrift.NewTSocketTimeout("host:port", time.Second, time.Second) -// transFactory := thrift.NewTFramedTransportFactoryMaxLength( -// thrift.NewTTransportFactory(), -// 1024 * 1024 * 256, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactory(true, true) -// -// This is the wrong way to do it because in the end the TConfiguration used by -// socket and transFactory will be overwritten by the one used by protoFactory -// because of TConfiguration propagation: -// -// // bad example, DO NOT USE -// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, -// }) -// transFactory := thrift.NewTFramedTransportFactoryConf( -// thrift.NewTTransportFactory(), -// &thrift.TConfiguration{ -// MaxFrameSize: 1024 * 1024 * 256, -// }, -// ) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{ -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// }) -// -// This is the correct way to do it: -// -// conf := &thrift.TConfiguration{ -// ConnectTimeout: time.Second, -// SocketTimeout: time.Second, -// -// MaxFrameSize: 1024 * 1024 * 256, -// -// TBinaryStrictRead: thrift.BoolPtr(true), -// TBinaryStrictWrite: thrift.BoolPtr(true), -// } -// sccket := thrift.NewTSocketConf("host:port", conf) -// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf) -// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf) -// -// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md -type TConfiguration struct { - // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead. - MaxMessageSize int32 - - // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead. - // - // Also if MaxMessageSize < MaxFrameSize, - // MaxMessageSize will be used instead. - MaxFrameSize int32 - - // Connect and socket timeouts to be used by TSocket and TSSLSocket. - // - // 0 means no timeout. - // - // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be - // used. - ConnectTimeout time.Duration - SocketTimeout time.Duration - - // TLS config to be used by TSSLSocket. - TLSConfig *tls.Config - - // Strict read/write configurations for TBinaryProtocol. - // - // BoolPtr helper function is available to use literal values. - TBinaryStrictRead *bool - TBinaryStrictWrite *bool - - // The wrapped protocol id to be used in THeader transport/protocol. - // - // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions - // are provided to help filling this value. - THeaderProtocolID *THeaderProtocolID - - // Used internally by deprecated constructors, to avoid overriding - // underlying TTransport/TProtocol's cfg by accidental propagations. - // - // For external users this is always false. - noPropagation bool -} - -// GetMaxMessageSize returns the max message size an implementation should -// follow. -// -// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil. -func (tc *TConfiguration) GetMaxMessageSize() int32 { - if tc == nil || tc.MaxMessageSize <= 0 { - return DEFAULT_MAX_MESSAGE_SIZE - } - return tc.MaxMessageSize -} - -// GetMaxFrameSize returns the max frame size an implementation should follow. -// -// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil. -// -// If the configured max message size is smaller than the configured max frame -// size, the smaller one will be returned instead. -func (tc *TConfiguration) GetMaxFrameSize() int32 { - if tc == nil { - return DEFAULT_MAX_FRAME_SIZE - } - maxFrameSize := tc.MaxFrameSize - if maxFrameSize <= 0 { - maxFrameSize = DEFAULT_MAX_FRAME_SIZE - } - if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize { - return maxMessageSize - } - return maxFrameSize -} - -// GetConnectTimeout returns the connect timeout should be used by TSocket and -// TSSLSocket. -// -// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead. -func (tc *TConfiguration) GetConnectTimeout() time.Duration { - if tc == nil || tc.ConnectTimeout < 0 { - return DEFAULT_CONNECT_TIMEOUT - } - return tc.ConnectTimeout -} - -// GetSocketTimeout returns the socket timeout should be used by TSocket and -// TSSLSocket. -// -// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead. -func (tc *TConfiguration) GetSocketTimeout() time.Duration { - if tc == nil || tc.SocketTimeout < 0 { - return DEFAULT_SOCKET_TIMEOUT - } - return tc.SocketTimeout -} - -// GetTLSConfig returns the tls config should be used by TSSLSocket. -// -// It's nil-safe. If tc is nil, nil will be returned instead. -func (tc *TConfiguration) GetTLSConfig() *tls.Config { - if tc == nil { - return nil - } - return tc.TLSConfig -} - -// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol -// should follow. -// -// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or -// tc.TBinaryStrictRead is nil. -func (tc *TConfiguration) GetTBinaryStrictRead() bool { - if tc == nil || tc.TBinaryStrictRead == nil { - return DEFAULT_TBINARY_STRICT_READ - } - return *tc.TBinaryStrictRead -} - -// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol -// should follow. -// -// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or -// tc.TBinaryStrictWrite is nil. -func (tc *TConfiguration) GetTBinaryStrictWrite() bool { - if tc == nil || tc.TBinaryStrictWrite == nil { - return DEFAULT_TBINARY_STRICT_WRITE - } - return *tc.TBinaryStrictWrite -} - -// GetTHeaderProtocolID returns the THeaderProtocolID should be used by -// THeaderProtocol clients (for servers, they always use the same one as the -// client instead). -// -// It's nil-safe. If either tc or tc.THeaderProtocolID is nil, -// THeaderProtocolDefault will be returned instead. -// THeaderProtocolDefault will also be returned if configured value is invalid. -func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID { - if tc == nil || tc.THeaderProtocolID == nil { - return THeaderProtocolDefault - } - protoID := *tc.THeaderProtocolID - if err := protoID.Validate(); err != nil { - return THeaderProtocolDefault - } - return protoID -} - -// THeaderProtocolIDPtr validates and returns the pointer to id. -// -// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault -// and the validation error will be returned. -func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) { - err := id.Validate() - if err != nil { - id = THeaderProtocolDefault - } - return &id, err -} - -// THeaderProtocolIDPtrMust validates and returns the pointer to id. -// -// It's similar to THeaderProtocolIDPtr, but it panics on validation errors -// instead of returning them. -func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID { - ptr, err := THeaderProtocolIDPtr(id) - if err != nil { - panic(err) - } - return ptr -} - -// TConfigurationSetter is an optional interface TProtocol, TTransport, -// TProtocolFactory, TTransportFactory, and other implementations can implement. -// -// It's intended to be called during intializations. -// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the -// middle of a message is undefined: -// It may or may not change the behavior of the current processing message, -// and it may even cause the current message to fail. -// -// Note for implementations: SetTConfiguration might be called multiple times -// with the same value in quick successions due to the implementation of the -// propagation. Implementations should make SetTConfiguration as simple as -// possible (usually just overwrite the stored configuration and propagate it to -// the wrapped TTransports/TProtocols). -type TConfigurationSetter interface { - SetTConfiguration(*TConfiguration) -} - -// PropagateTConfiguration propagates cfg to impl if impl implements -// TConfigurationSetter and cfg is non-nil, otherwise it does nothing. -// -// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration -// with everything being default value, use &TConfiguration{} explicitly instead. -func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) { - if cfg == nil || cfg.noPropagation { - return - } - - if setter, ok := impl.(TConfigurationSetter); ok { - setter.SetTConfiguration(cfg) - } -} - -func checkSizeForProtocol(size int32, cfg *TConfiguration) error { - if size < 0 { - return NewTProtocolExceptionWithType( - NEGATIVE_SIZE, - fmt.Errorf("negative size: %d", size), - ) - } - if size > cfg.GetMaxMessageSize() { - return NewTProtocolExceptionWithType( - SIZE_LIMIT, - fmt.Errorf("size exceeded max allowed: %d", size), - ) - } - return nil -} - -type tTransportFactoryConf struct { - delegate TTransportFactory - cfg *TConfiguration -} - -func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) { - trans, err := f.delegate.GetTransport(orig) - if err == nil { - PropagateTConfiguration(orig, f.cfg) - PropagateTConfiguration(trans, f.cfg) - } - return trans, err -} - -func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(f.delegate, f.cfg) - f.cfg = cfg -} - -// TTransportFactoryConf wraps a TTransportFactory to propagate -// TConfiguration on the factory's GetTransport calls. -func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory { - return &tTransportFactoryConf{ - delegate: delegate, - cfg: conf, - } -} - -type tProtocolFactoryConf struct { - delegate TProtocolFactory - cfg *TConfiguration -} - -func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol { - proto := f.delegate.GetProtocol(trans) - PropagateTConfiguration(trans, f.cfg) - PropagateTConfiguration(proto, f.cfg) - return proto -} - -func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(f.delegate, f.cfg) - f.cfg = cfg -} - -// TProtocolFactoryConf wraps a TProtocolFactory to propagate -// TConfiguration on the factory's GetProtocol calls. -func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory { - return &tProtocolFactoryConf{ - delegate: delegate, - cfg: conf, - } -} - -var ( - _ TConfigurationSetter = (*tTransportFactoryConf)(nil) - _ TConfigurationSetter = (*tProtocolFactoryConf)(nil) -) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/context.go b/vendor/github.com/apache/thrift/lib/go/thrift/context.go deleted file mode 100644 index d15c1bcf..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/context.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -var defaultCtx = context.Background() diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go deleted file mode 100644 index fdf9bfec..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go +++ /dev/null @@ -1,447 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "fmt" -) - -type TDebugProtocol struct { - // Required. The actual TProtocol to do the read/write. - Delegate TProtocol - - // Optional. The logger and prefix to log all the args/return values - // from Delegate TProtocol calls. - // - // If Logger is nil, StdLogger using stdlib log package with os.Stderr - // will be used. If disable logging is desired, set Logger to NopLogger - // explicitly instead of leaving it as nil/unset. - Logger Logger - LogPrefix string - - // Optional. An TProtocol to duplicate everything read/written from Delegate. - // - // A typical use case of this is to use TSimpleJSONProtocol wrapping - // TMemoryBuffer in a middleware to json logging requests/responses. - // - // This feature is not available from TDebugProtocolFactory. In order to - // use it you have to construct TDebugProtocol directly, or set DuplicateTo - // field after getting a TDebugProtocol from the factory. - DuplicateTo TProtocol -} - -type TDebugProtocolFactory struct { - Underlying TProtocolFactory - LogPrefix string - Logger Logger -} - -// NewTDebugProtocolFactory creates a TDebugProtocolFactory. -// -// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct -// itself instead. This version will use the default logger from standard -// library. -func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { - return &TDebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - Logger: StdLogger(nil), - } -} - -// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory. -func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory { - return &TDebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - Logger: logger, - } -} - -func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return &TDebugProtocol{ - Delegate: t.Underlying.GetProtocol(trans), - LogPrefix: t.LogPrefix, - Logger: fallbackLogger(t.Logger), - } -} - -func (tdp *TDebugProtocol) logf(format string, v ...interface{}) { - fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...)) -} - -func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { - err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid) - tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) - } - return err -} -func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error { - err := tdp.Delegate.WriteMessageEnd(ctx) - tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error { - err := tdp.Delegate.WriteStructBegin(ctx, name) - tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructBegin(ctx, name) - } - return err -} -func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error { - err := tdp.Delegate.WriteStructEnd(ctx) - tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id) - tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) - } - return err -} -func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error { - err := tdp.Delegate.WriteFieldEnd(ctx) - tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error { - err := tdp.Delegate.WriteFieldStop(ctx) - tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldStop(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size) - tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) - } - return err -} -func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error { - err := tdp.Delegate.WriteMapEnd(ctx) - tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - err := tdp.Delegate.WriteListBegin(ctx, elemType, size) - tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) - } - return err -} -func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error { - err := tdp.Delegate.WriteListEnd(ctx) - tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - err := tdp.Delegate.WriteSetBegin(ctx, elemType, size) - tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) - } - return err -} -func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error { - err := tdp.Delegate.WriteSetEnd(ctx) - tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetEnd(ctx) - } - return err -} -func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error { - err := tdp.Delegate.WriteBool(ctx, value) - tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBool(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error { - err := tdp.Delegate.WriteByte(ctx, value) - tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteByte(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error { - err := tdp.Delegate.WriteI16(ctx, value) - tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI16(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error { - err := tdp.Delegate.WriteI32(ctx, value) - tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI32(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error { - err := tdp.Delegate.WriteI64(ctx, value) - tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI64(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error { - err := tdp.Delegate.WriteDouble(ctx, value) - tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteDouble(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error { - err := tdp.Delegate.WriteString(ctx, value) - tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteString(ctx, value) - } - return err -} -func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error { - err := tdp.Delegate.WriteBinary(ctx, value) - tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBinary(ctx, value) - } - return err -} - -func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { - name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx) - tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid) - } - return -} -func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadMessageEnd(ctx) - tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMessageEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - name, err = tdp.Delegate.ReadStructBegin(ctx) - tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructBegin(ctx, name) - } - return -} -func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadStructEnd(ctx) - tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteStructEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) { - name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx) - tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id) - } - return -} -func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadFieldEnd(ctx) - tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteFieldEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { - keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx) - tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size) - } - return -} -func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadMapEnd(ctx) - tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteMapEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadListBegin(ctx) - tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListBegin(ctx, elemType, size) - } - return -} -func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadListEnd(ctx) - tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteListEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadSetBegin(ctx) - tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size) - } - return -} -func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) { - err = tdp.Delegate.ReadSetEnd(ctx) - tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteSetEnd(ctx) - } - return -} -func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) { - value, err = tdp.Delegate.ReadBool(ctx) - tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBool(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) { - value, err = tdp.Delegate.ReadByte(ctx) - tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteByte(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) { - value, err = tdp.Delegate.ReadI16(ctx) - tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI16(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) { - value, err = tdp.Delegate.ReadI32(ctx) - tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI32(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) { - value, err = tdp.Delegate.ReadI64(ctx) - tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteI64(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - value, err = tdp.Delegate.ReadDouble(ctx) - tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteDouble(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) { - value, err = tdp.Delegate.ReadString(ctx) - tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteString(ctx, value) - } - return -} -func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { - value, err = tdp.Delegate.ReadBinary(ctx) - tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.WriteBinary(ctx, value) - } - return -} -func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - err = tdp.Delegate.Skip(ctx, fieldType) - tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.Skip(ctx, fieldType) - } - return -} -func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { - err = tdp.Delegate.Flush(ctx) - tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err) - if tdp.DuplicateTo != nil { - tdp.DuplicateTo.Flush(ctx) - } - return -} - -func (tdp *TDebugProtocol) Transport() TTransport { - return tdp.Delegate.Transport() -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(tdp.Delegate, conf) - PropagateTConfiguration(tdp.DuplicateTo, conf) -} - -var _ TConfigurationSetter = (*TDebugProtocol)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go deleted file mode 100644 index cefc7ecd..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "sync" -) - -type TDeserializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -func NewTDeserializer() *TDeserializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolTransport(transport) - - return &TDeserializer{ - Transport: transport, - Protocol: protocol, - } -} - -func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) { - t.Transport.Reset() - - err = nil - if _, err = t.Transport.Write([]byte(s)); err != nil { - return - } - if err = msg.Read(ctx, t.Protocol); err != nil { - return - } - return -} - -func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) { - t.Transport.Reset() - - err = nil - if _, err = t.Transport.Write(b); err != nil { - return - } - if err = msg.Read(ctx, t.Protocol); err != nil { - return - } - return -} - -// TDeserializerPool is the thread-safe version of TDeserializer, -// it uses resource pool of TDeserializer under the hood. -// -// It must be initialized with either NewTDeserializerPool or -// NewTDeserializerPoolSizeFactory. -type TDeserializerPool struct { - pool sync.Pool -} - -// NewTDeserializerPool creates a new TDeserializerPool. -// -// NewTDeserializer can be used as the arg here. -func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool { - return &TDeserializerPool{ - pool: sync.Pool{ - New: func() interface{} { - return f() - }, - }, - } -} - -// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with -// the given size and protocol factory. -// -// Note that the size is not the limit. The TMemoryBuffer underneath can grow -// larger than that. It just dictates the initial size. -func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool { - return &TDeserializerPool{ - pool: sync.Pool{ - New: func() interface{} { - transport := NewTMemoryBufferLen(size) - protocol := factory.GetProtocol(transport) - - return &TDeserializer{ - Transport: transport, - Protocol: protocol, - } - }, - }, - } -} - -func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error { - d := t.pool.Get().(*TDeserializer) - defer t.pool.Put(d) - return d.ReadString(ctx, msg, s) -} - -func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error { - d := t.pool.Get().(*TDeserializer) - defer t.pool.Put(d) - return d.Read(ctx, msg, b) -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go deleted file mode 100644 index 53bf862e..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" -) - -// Generic Thrift exception -type TException interface { - error - - TExceptionType() TExceptionType -} - -// Prepends additional information to an error without losing the Thrift exception interface -func PrependError(prepend string, err error) error { - msg := prepend + err.Error() - - var te TException - if errors.As(err, &te) { - switch te.TExceptionType() { - case TExceptionTypeTransport: - if t, ok := err.(TTransportException); ok { - return prependTTransportException(prepend, t) - } - case TExceptionTypeProtocol: - if t, ok := err.(TProtocolException); ok { - return prependTProtocolException(prepend, t) - } - case TExceptionTypeApplication: - var t TApplicationException - if errors.As(err, &t) { - return NewTApplicationException(t.TypeId(), msg) - } - } - - return wrappedTException{ - err: err, - msg: msg, - tExceptionType: te.TExceptionType(), - } - } - - return errors.New(msg) -} - -// TExceptionType is an enum type to categorize different "subclasses" of TExceptions. -type TExceptionType byte - -// TExceptionType values -const ( - TExceptionTypeUnknown TExceptionType = iota - TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler - TExceptionTypeApplication // TApplicationExceptions - TExceptionTypeProtocol // TProtocolExceptions - TExceptionTypeTransport // TTransportExceptions -) - -// WrapTException wraps an error into TException. -// -// If err is nil or already TException, it's returned as-is. -// Otherwise it will be wraped into TException with TExceptionType() returning -// TExceptionTypeUnknown, and Unwrap() returning the original error. -func WrapTException(err error) TException { - if err == nil { - return nil - } - - if te, ok := err.(TException); ok { - return te - } - - return wrappedTException{ - err: err, - msg: err.Error(), - tExceptionType: TExceptionTypeUnknown, - } -} - -type wrappedTException struct { - err error - msg string - tExceptionType TExceptionType -} - -func (w wrappedTException) Error() string { - return w.msg -} - -func (w wrappedTException) TExceptionType() TExceptionType { - return w.tExceptionType -} - -func (w wrappedTException) Unwrap() error { - return w.err -} - -var _ TException = wrappedTException{} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go deleted file mode 100644 index c8bd35e3..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "fmt" - "io" -) - -// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead. -const DEFAULT_MAX_LENGTH = 16384000 - -type TFramedTransport struct { - transport TTransport - - cfg *TConfiguration - - writeBuf *bytes.Buffer - - reader *bufio.Reader - readBuf *bytes.Buffer - - buffer [4]byte -} - -type tFramedTransportFactory struct { - factory TTransportFactory - cfg *TConfiguration -} - -// Deprecated: Use NewTFramedTransportFactoryConf instead. -func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { - return NewTFramedTransportFactoryConf(factory, &TConfiguration{ - MaxFrameSize: DEFAULT_MAX_LENGTH, - - noPropagation: true, - }) -} - -// Deprecated: Use NewTFramedTransportFactoryConf instead. -func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { - return NewTFramedTransportFactoryConf(factory, &TConfiguration{ - MaxFrameSize: int32(maxLength), - - noPropagation: true, - }) -} - -func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { - PropagateTConfiguration(factory, conf) - return &tFramedTransportFactory{ - factory: factory, - cfg: conf, - } -} - -func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { - PropagateTConfiguration(base, p.cfg) - tt, err := p.factory.GetTransport(base) - if err != nil { - return nil, err - } - return NewTFramedTransportConf(tt, p.cfg), nil -} - -func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(p.factory, cfg) - p.cfg = cfg -} - -// Deprecated: Use NewTFramedTransportConf instead. -func NewTFramedTransport(transport TTransport) *TFramedTransport { - return NewTFramedTransportConf(transport, &TConfiguration{ - MaxFrameSize: DEFAULT_MAX_LENGTH, - - noPropagation: true, - }) -} - -// Deprecated: Use NewTFramedTransportConf instead. -func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { - return NewTFramedTransportConf(transport, &TConfiguration{ - MaxFrameSize: int32(maxLength), - - noPropagation: true, - }) -} - -func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport { - PropagateTConfiguration(transport, conf) - return &TFramedTransport{ - transport: transport, - reader: bufio.NewReader(transport), - cfg: conf, - } -} - -func (p *TFramedTransport) Open() error { - return p.transport.Open() -} - -func (p *TFramedTransport) IsOpen() bool { - return p.transport.IsOpen() -} - -func (p *TFramedTransport) Close() error { - return p.transport.Close() -} - -func (p *TFramedTransport) Read(buf []byte) (read int, err error) { - defer func() { - // Make sure we return the read buffer back to pool - // after we finished reading from it. - if p.readBuf != nil && p.readBuf.Len() == 0 { - returnBufToPool(&p.readBuf) - } - }() - - if p.readBuf != nil { - - read, err = p.readBuf.Read(buf) - if err != io.EOF { - return - } - - // For bytes.Buffer.Read, EOF would only happen when read is zero, - // but still, do a sanity check, - // in case that behavior is changed in a future version of go stdlib. - // When that happens, just return nil error, - // and let the caller call Read again to read the next frame. - if read > 0 { - return read, nil - } - } - - // Reaching here means that the last Read finished the last frame, - // so we need to read the next frame into readBuf now. - if err = p.readFrame(); err != nil { - return read, err - } - newRead, err := p.Read(buf[read:]) - return read + newRead, err -} - -func (p *TFramedTransport) ReadByte() (c byte, err error) { - buf := p.buffer[:1] - _, err = p.Read(buf) - if err != nil { - return - } - c = buf[0] - return -} - -func (p *TFramedTransport) ensureWriteBufferBeforeWrite() { - if p.writeBuf == nil { - p.writeBuf = getBufFromPool() - } -} - -func (p *TFramedTransport) Write(buf []byte) (int, error) { - p.ensureWriteBufferBeforeWrite() - n, err := p.writeBuf.Write(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) WriteByte(c byte) error { - p.ensureWriteBufferBeforeWrite() - return p.writeBuf.WriteByte(c) -} - -func (p *TFramedTransport) WriteString(s string) (n int, err error) { - p.ensureWriteBufferBeforeWrite() - return p.writeBuf.WriteString(s) -} - -func (p *TFramedTransport) Flush(ctx context.Context) error { - defer returnBufToPool(&p.writeBuf) - size := p.writeBuf.Len() - buf := p.buffer[:4] - binary.BigEndian.PutUint32(buf, uint32(size)) - _, err := p.transport.Write(buf) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if size > 0 { - if _, err := io.Copy(p.transport, p.writeBuf); err != nil { - return NewTTransportExceptionFromError(err) - } - } - err = p.transport.Flush(ctx) - return NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) readFrame() error { - if p.readBuf != nil { - returnBufToPool(&p.readBuf) - } - p.readBuf = getBufFromPool() - - buf := p.buffer[:4] - if _, err := io.ReadFull(p.reader, buf); err != nil { - return err - } - size := binary.BigEndian.Uint32(buf) - if size > uint32(p.cfg.GetMaxFrameSize()) { - return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) - } - _, err := io.CopyN(p.readBuf, p.reader, int64(size)) - return NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { - if p.readBuf == nil { - return 0 - } - return uint64(p.readBuf.Len()) -} - -// SetTConfiguration implements TConfigurationSetter. -func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(p.transport, cfg) - p.cfg = cfg -} - -var ( - _ TConfigurationSetter = (*tFramedTransportFactory)(nil) - _ TConfigurationSetter = (*TFramedTransport)(nil) -) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_context.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_context.go deleted file mode 100644 index ac9bd488..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/header_context.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. -type ( - headerKey string - headerKeyList int -) - -// Values for headerKeyList. -const ( - headerKeyListRead headerKeyList = iota - headerKeyListWrite -) - -// SetHeader sets a header in the context. -func SetHeader(ctx context.Context, key, value string) context.Context { - return context.WithValue( - ctx, - headerKey(key), - value, - ) -} - -// UnsetHeader unsets a previously set header in the context. -func UnsetHeader(ctx context.Context, key string) context.Context { - return context.WithValue( - ctx, - headerKey(key), - nil, - ) -} - -// GetHeader returns a value of the given header from the context. -func GetHeader(ctx context.Context, key string) (value string, ok bool) { - if v := ctx.Value(headerKey(key)); v != nil { - value, ok = v.(string) - } - return -} - -// SetReadHeaderList sets the key list of read THeaders in the context. -func SetReadHeaderList(ctx context.Context, keys []string) context.Context { - return context.WithValue( - ctx, - headerKeyListRead, - keys, - ) -} - -// GetReadHeaderList returns the key list of read THeaders from the context. -func GetReadHeaderList(ctx context.Context) []string { - if v := ctx.Value(headerKeyListRead); v != nil { - if value, ok := v.([]string); ok { - return value - } - } - return nil -} - -// SetWriteHeaderList sets the key list of THeaders to write in the context. -func SetWriteHeaderList(ctx context.Context, keys []string) context.Context { - return context.WithValue( - ctx, - headerKeyListWrite, - keys, - ) -} - -// GetWriteHeaderList returns the key list of THeaders to write from the context. -func GetWriteHeaderList(ctx context.Context) []string { - if v := ctx.Value(headerKeyListWrite); v != nil { - if value, ok := v.([]string); ok { - return value - } - } - return nil -} - -// AddReadTHeaderToContext adds the whole THeader headers into context. -func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context { - keys := make([]string, 0, len(headers)) - for key, value := range headers { - ctx = SetHeader(ctx, key, value) - keys = append(keys, key) - } - return SetReadHeaderList(ctx, keys) -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go deleted file mode 100644 index 878041f8..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/header_protocol.go +++ /dev/null @@ -1,351 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" -) - -// THeaderProtocol is a thrift protocol that implements THeader: -// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md -// -// It supports either binary or compact protocol as the wrapped protocol. -// -// Most of the THeader handlings are happening inside THeaderTransport. -type THeaderProtocol struct { - transport *THeaderTransport - - // Will be initialized on first read/write. - protocol TProtocol - - cfg *TConfiguration -} - -// Deprecated: Use NewTHeaderProtocolConf instead. -func NewTHeaderProtocol(trans TTransport) *THeaderProtocol { - return newTHeaderProtocolConf(trans, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying -// transport with given TConfiguration. -// -// The passed in transport will be wrapped with THeaderTransport. -// -// Note that THeaderTransport handles frame and zlib by itself, -// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), -// instead of rich transports like TZlibTransport or TFramedTransport. -func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol { - return newTHeaderProtocolConf(trans, conf) -} - -func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol { - t := NewTHeaderTransportConf(trans, cfg) - p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t) - PropagateTConfiguration(p, cfg) - return &THeaderProtocol{ - transport: t, - protocol: p, - cfg: cfg, - } -} - -type tHeaderProtocolFactory struct { - cfg *TConfiguration -} - -func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return newTHeaderProtocolConf(trans, f.cfg) -} - -func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) { - f.cfg = cfg -} - -// Deprecated: Use NewTHeaderProtocolFactoryConf instead. -func NewTHeaderProtocolFactory() TProtocolFactory { - return NewTHeaderProtocolFactoryConf(&TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderProtocolFactoryConf creates a factory for THeader with given -// TConfiguration. -func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory { - return tHeaderProtocolFactory{ - cfg: conf, - } -} - -// Transport returns the underlying transport. -// -// It's guaranteed to be of type *THeaderTransport. -func (p *THeaderProtocol) Transport() TTransport { - return p.transport -} - -// GetReadHeaders returns the THeaderMap read from transport. -func (p *THeaderProtocol) GetReadHeaders() THeaderMap { - return p.transport.GetReadHeaders() -} - -// SetWriteHeader sets a header for write. -func (p *THeaderProtocol) SetWriteHeader(key, value string) { - p.transport.SetWriteHeader(key, value) -} - -// ClearWriteHeaders clears all write headers previously set. -func (p *THeaderProtocol) ClearWriteHeaders() { - p.transport.ClearWriteHeaders() -} - -// AddTransform add a transform for writing. -func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error { - return p.transport.AddTransform(transform) -} - -func (p *THeaderProtocol) Flush(ctx context.Context) error { - return p.transport.Flush(ctx) -} - -func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error { - newProto, err := p.transport.Protocol().GetProtocol(p.transport) - if err != nil { - return err - } - PropagateTConfiguration(newProto, p.cfg) - p.protocol = newProto - p.transport.SequenceID = seqID - return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID) -} - -func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error { - if err := p.protocol.WriteMessageEnd(ctx); err != nil { - return err - } - return p.transport.Flush(ctx) -} - -func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error { - return p.protocol.WriteStructBegin(ctx, name) -} - -func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error { - return p.protocol.WriteStructEnd(ctx) -} - -func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error { - return p.protocol.WriteFieldBegin(ctx, name, typeID, id) -} - -func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error { - return p.protocol.WriteFieldEnd(ctx) -} - -func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error { - return p.protocol.WriteFieldStop(ctx) -} - -func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - return p.protocol.WriteMapBegin(ctx, keyType, valueType, size) -} - -func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error { - return p.protocol.WriteMapEnd(ctx) -} - -func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - return p.protocol.WriteListBegin(ctx, elemType, size) -} - -func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error { - return p.protocol.WriteListEnd(ctx) -} - -func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - return p.protocol.WriteSetBegin(ctx, elemType, size) -} - -func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error { - return p.protocol.WriteSetEnd(ctx) -} - -func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error { - return p.protocol.WriteBool(ctx, value) -} - -func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error { - return p.protocol.WriteByte(ctx, value) -} - -func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error { - return p.protocol.WriteI16(ctx, value) -} - -func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error { - return p.protocol.WriteI32(ctx, value) -} - -func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error { - return p.protocol.WriteI64(ctx, value) -} - -func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error { - return p.protocol.WriteDouble(ctx, value) -} - -func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error { - return p.protocol.WriteString(ctx, value) -} - -func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error { - return p.protocol.WriteBinary(ctx, value) -} - -// ReadFrame calls underlying THeaderTransport's ReadFrame function. -func (p *THeaderProtocol) ReadFrame(ctx context.Context) error { - return p.transport.ReadFrame(ctx) -} - -func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) { - if err = p.transport.ReadFrame(ctx); err != nil { - return - } - - var newProto TProtocol - newProto, err = p.transport.Protocol().GetProtocol(p.transport) - if err != nil { - var tAppExc TApplicationException - if !errors.As(err, &tAppExc) { - return - } - if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil { - return - } - if e := tAppExc.Write(ctx, p.protocol); e != nil { - return - } - if e := p.protocol.WriteMessageEnd(ctx); e != nil { - return - } - if e := p.transport.Flush(ctx); e != nil { - return - } - return - } - PropagateTConfiguration(newProto, p.cfg) - p.protocol = newProto - - return p.protocol.ReadMessageBegin(ctx) -} - -func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error { - return p.protocol.ReadMessageEnd(ctx) -} - -func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - return p.protocol.ReadStructBegin(ctx) -} - -func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error { - return p.protocol.ReadStructEnd(ctx) -} - -func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) { - return p.protocol.ReadFieldBegin(ctx) -} - -func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error { - return p.protocol.ReadFieldEnd(ctx) -} - -func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) { - return p.protocol.ReadMapBegin(ctx) -} - -func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error { - return p.protocol.ReadMapEnd(ctx) -} - -func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) { - return p.protocol.ReadListBegin(ctx) -} - -func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error { - return p.protocol.ReadListEnd(ctx) -} - -func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) { - return p.protocol.ReadSetBegin(ctx) -} - -func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error { - return p.protocol.ReadSetEnd(ctx) -} - -func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) { - return p.protocol.ReadBool(ctx) -} - -func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) { - return p.protocol.ReadByte(ctx) -} - -func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) { - return p.protocol.ReadI16(ctx) -} - -func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) { - return p.protocol.ReadI32(ctx) -} - -func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) { - return p.protocol.ReadI64(ctx) -} - -func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) { - return p.protocol.ReadDouble(ctx) -} - -func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) { - return p.protocol.ReadString(ctx) -} - -func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) { - return p.protocol.ReadBinary(ctx) -} - -func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error { - return p.protocol.Skip(ctx, fieldType) -} - -// SetTConfiguration implements TConfigurationSetter. -func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(p.transport, cfg) - PropagateTConfiguration(p.protocol, cfg) - p.cfg = cfg -} - -var ( - _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil) - _ TConfigurationSetter = (*THeaderProtocol)(nil) -) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go deleted file mode 100644 index 5ec04548..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/header_transport.go +++ /dev/null @@ -1,816 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "compress/zlib" - "context" - "encoding/binary" - "errors" - "fmt" - "io" -) - -// Size in bytes for 32-bit ints. -const size32 = 4 - -type headerMeta struct { - MagicFlags uint32 - SequenceID int32 - HeaderLength uint16 -} - -const headerMetaSize = 10 - -type clientType int - -const ( - clientUnknown clientType = iota - clientHeaders - clientFramedBinary - clientUnframedBinary - clientFramedCompact - clientUnframedCompact -) - -// Constants defined in THeader format: -// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md -const ( - THeaderHeaderMagic uint32 = 0x0fff0000 - THeaderHeaderMask uint32 = 0xffff0000 - THeaderFlagsMask uint32 = 0x0000ffff - THeaderMaxFrameSize uint32 = 0x3fffffff -) - -// THeaderMap is the type of the header map in THeader transport. -type THeaderMap map[string]string - -// THeaderProtocolID is the wrapped protocol id used in THeader. -type THeaderProtocolID int32 - -// Supported THeaderProtocolID values. -const ( - THeaderProtocolBinary THeaderProtocolID = 0x00 - THeaderProtocolCompact THeaderProtocolID = 0x02 - THeaderProtocolDefault = THeaderProtocolBinary -) - -// Declared globally to avoid repetitive allocations, not really used. -var globalMemoryBuffer = NewTMemoryBuffer() - -// Validate checks whether the THeaderProtocolID is a valid/supported one. -func (id THeaderProtocolID) Validate() error { - _, err := id.GetProtocol(globalMemoryBuffer) - return err -} - -// GetProtocol gets the corresponding TProtocol from the wrapped protocol id. -func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) { - switch id { - default: - return nil, NewTApplicationException( - INVALID_PROTOCOL, - fmt.Sprintf("THeader protocol id %d not supported", id), - ) - case THeaderProtocolBinary: - return NewTBinaryProtocolTransport(trans), nil - case THeaderProtocolCompact: - return NewTCompactProtocol(trans), nil - } -} - -// THeaderTransformID defines the numeric id of the transform used. -type THeaderTransformID int32 - -// THeaderTransformID values. -// -// Values not defined here are not currently supported, namely HMAC and Snappy. -const ( - TransformNone THeaderTransformID = iota // 0, no special handling - TransformZlib // 1, zlib -) - -var supportedTransformIDs = map[THeaderTransformID]bool{ - TransformNone: true, - TransformZlib: true, -} - -// TransformReader is an io.ReadCloser that handles transforms reading. -type TransformReader struct { - io.Reader - - closers []io.Closer -} - -var _ io.ReadCloser = (*TransformReader)(nil) - -// NewTransformReaderWithCapacity initializes a TransformReader with expected -// closers capacity. -// -// If you don't know the closers capacity beforehand, just use -// -// &TransformReader{Reader: baseReader} -// -// instead would be sufficient. -func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader { - return &TransformReader{ - Reader: baseReader, - closers: make([]io.Closer, 0, capacity), - } -} - -// Close calls the underlying closers in appropriate order, -// stops at and returns the first error encountered. -func (tr *TransformReader) Close() error { - // Call closers in reversed order - for i := len(tr.closers) - 1; i >= 0; i-- { - if err := tr.closers[i].Close(); err != nil { - return err - } - } - return nil -} - -// AddTransform adds a transform. -func (tr *TransformReader) AddTransform(id THeaderTransformID) error { - switch id { - default: - return NewTApplicationException( - INVALID_TRANSFORM, - fmt.Sprintf("THeaderTransformID %d not supported", id), - ) - case TransformNone: - // no-op - case TransformZlib: - readCloser, err := zlib.NewReader(tr.Reader) - if err != nil { - return err - } - tr.Reader = readCloser - tr.closers = append(tr.closers, readCloser) - } - return nil -} - -// TransformWriter is an io.WriteCloser that handles transforms writing. -type TransformWriter struct { - io.Writer - - closers []io.Closer -} - -var _ io.WriteCloser = (*TransformWriter)(nil) - -// NewTransformWriter creates a new TransformWriter with base writer and transforms. -func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) { - writer := &TransformWriter{ - Writer: baseWriter, - closers: make([]io.Closer, 0, len(transforms)), - } - for _, id := range transforms { - if err := writer.AddTransform(id); err != nil { - return nil, err - } - } - return writer, nil -} - -// Close calls the underlying closers in appropriate order, -// stops at and returns the first error encountered. -func (tw *TransformWriter) Close() error { - // Call closers in reversed order - for i := len(tw.closers) - 1; i >= 0; i-- { - if err := tw.closers[i].Close(); err != nil { - return err - } - } - return nil -} - -// AddTransform adds a transform. -func (tw *TransformWriter) AddTransform(id THeaderTransformID) error { - switch id { - default: - return NewTApplicationException( - INVALID_TRANSFORM, - fmt.Sprintf("THeaderTransformID %d not supported", id), - ) - case TransformNone: - // no-op - case TransformZlib: - writeCloser := zlib.NewWriter(tw.Writer) - tw.Writer = writeCloser - tw.closers = append(tw.closers, writeCloser) - } - return nil -} - -// THeaderInfoType is the type id of the info headers. -type THeaderInfoType int32 - -// Supported THeaderInfoType values. -const ( - _ THeaderInfoType = iota // Skip 0 - InfoKeyValue // 1 - // Rest of the info types are not supported. -) - -// THeaderTransport is a Transport mode that implements THeader. -// -// Note that THeaderTransport handles frame and zlib by itself, -// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket), -// instead of rich transports like TZlibTransport or TFramedTransport. -type THeaderTransport struct { - SequenceID int32 - Flags uint32 - - transport TTransport - - // THeaderMap for read and write - readHeaders THeaderMap - writeHeaders THeaderMap - - // Reading related variables. - reader *bufio.Reader - // When frame is detected, we read the frame fully into frameBuffer. - frameBuffer *bytes.Buffer - // When it's non-nil, Read should read from frameReader instead of - // reader, and EOF error indicates end of frame instead of end of all - // transport. - frameReader io.ReadCloser - - // Writing related variables - writeBuffer *bytes.Buffer - writeTransforms []THeaderTransformID - - clientType clientType - protocolID THeaderProtocolID - cfg *TConfiguration - - // buffer is used in the following scenarios to avoid repetitive - // allocations, while 4 is big enough for all those scenarios: - // - // * header padding (max size 4) - // * write the frame size (size 4) - buffer [4]byte -} - -var _ TTransport = (*THeaderTransport)(nil) - -// Deprecated: Use NewTHeaderTransportConf instead. -func NewTHeaderTransport(trans TTransport) *THeaderTransport { - return NewTHeaderTransportConf(trans, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderTransportConf creates THeaderTransport from the -// underlying transport, with given TConfiguration attached. -// -// If trans is already a *THeaderTransport, it will be returned as is, -// but with TConfiguration overridden by the value passed in. -// -// The protocol ID in TConfiguration is only useful for client transports. -// For servers, -// the protocol ID will be overridden again to the one set by the client, -// to ensure that servers always speak the same dialect as the client. -func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport { - if ht, ok := trans.(*THeaderTransport); ok { - ht.SetTConfiguration(conf) - return ht - } - PropagateTConfiguration(trans, conf) - return &THeaderTransport{ - transport: trans, - reader: bufio.NewReader(trans), - writeHeaders: make(THeaderMap), - protocolID: conf.GetTHeaderProtocolID(), - cfg: conf, - } -} - -// Open calls the underlying transport's Open function. -func (t *THeaderTransport) Open() error { - return t.transport.Open() -} - -// IsOpen calls the underlying transport's IsOpen function. -func (t *THeaderTransport) IsOpen() bool { - return t.transport.IsOpen() -} - -// ReadFrame tries to read the frame header, guess the client type, and handle -// unframed clients. -func (t *THeaderTransport) ReadFrame(ctx context.Context) error { - if !t.needReadFrame() { - // No need to read frame, skipping. - return nil - } - - // Peek and handle the first 32 bits. - // They could either be the length field of a framed message, - // or the first bytes of an unframed message. - var buf []byte - var err error - // This is also usually the first read from a connection, - // so handle retries around socket timeouts. - _, deadlineSet := ctx.Deadline() - for { - buf, err = t.reader.Peek(size32) - if deadlineSet && isTimeoutError(err) && ctx.Err() == nil { - // This is I/O timeout and we still have time, - // continue trying - continue - } - // For anything else, do not retry - break - } - if err != nil { - return err - } - - frameSize := binary.BigEndian.Uint32(buf) - if frameSize&VERSION_MASK == VERSION_1 { - t.clientType = clientUnframedBinary - return nil - } - if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { - t.clientType = clientUnframedCompact - return nil - } - - // At this point it should be a framed message, - // sanity check on frameSize then discard the peeked part. - if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) { - return NewTProtocolExceptionWithType( - SIZE_LIMIT, - errors.New("frame too large"), - ) - } - t.reader.Discard(size32) - - // Read the frame fully into frameBuffer. - if t.frameBuffer == nil { - t.frameBuffer = getBufFromPool() - } - _, err = io.CopyN(t.frameBuffer, t.reader, int64(frameSize)) - if err != nil { - return err - } - t.frameReader = io.NopCloser(t.frameBuffer) - - // Peek and handle the next 32 bits. - buf = t.frameBuffer.Bytes()[:size32] - version := binary.BigEndian.Uint32(buf) - if version&THeaderHeaderMask == THeaderHeaderMagic { - t.clientType = clientHeaders - return t.parseHeaders(ctx, frameSize) - } - if version&VERSION_MASK == VERSION_1 { - t.clientType = clientFramedBinary - return nil - } - if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION { - t.clientType = clientFramedCompact - return nil - } - if err := t.endOfFrame(); err != nil { - return err - } - return NewTProtocolExceptionWithType( - NOT_IMPLEMENTED, - errors.New("unsupported client transport type"), - ) -} - -// endOfFrame does end of frame handling. -// -// It closes frameReader, and also resets frame related states. -func (t *THeaderTransport) endOfFrame() error { - defer func() { - returnBufToPool(&t.frameBuffer) - t.frameReader = nil - }() - return t.frameReader.Close() -} - -func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error { - if t.clientType != clientHeaders { - return nil - } - - var err error - var meta headerMeta - if err = binary.Read(t.frameBuffer, binary.BigEndian, &meta); err != nil { - return err - } - frameSize -= headerMetaSize - t.Flags = meta.MagicFlags & THeaderFlagsMask - t.SequenceID = meta.SequenceID - headerLength := int64(meta.HeaderLength) * 4 - if int64(frameSize) < headerLength { - return NewTProtocolExceptionWithType( - SIZE_LIMIT, - errors.New("header size is larger than the whole frame"), - ) - } - headerBuf := NewTMemoryBuffer() - _, err = io.CopyN(headerBuf, t.frameBuffer, headerLength) - if err != nil { - return err - } - hp := NewTCompactProtocol(headerBuf) - hp.SetTConfiguration(t.cfg) - - // At this point the header is already read into headerBuf, - // and t.frameBuffer starts from the actual payload. - protoID, err := hp.readVarint32() - if err != nil { - return err - } - t.protocolID = THeaderProtocolID(protoID) - - var transformCount int32 - transformCount, err = hp.readVarint32() - if err != nil { - return err - } - if transformCount > 0 { - reader := NewTransformReaderWithCapacity( - t.frameBuffer, - int(transformCount), - ) - t.frameReader = reader - transformIDs := make([]THeaderTransformID, transformCount) - for i := 0; i < int(transformCount); i++ { - id, err := hp.readVarint32() - if err != nil { - return err - } - transformIDs[i] = THeaderTransformID(id) - } - // The transform IDs on the wire was added based on the order of - // writing, so on the reading side we need to reverse the order. - for i := transformCount - 1; i >= 0; i-- { - id := transformIDs[i] - if err := reader.AddTransform(id); err != nil { - return err - } - } - } - - // The info part does not use the transforms yet, so it's - // important to continue using headerBuf. - headers := make(THeaderMap) - for { - infoType, err := hp.readVarint32() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return err - } - if THeaderInfoType(infoType) == InfoKeyValue { - count, err := hp.readVarint32() - if err != nil { - return err - } - for i := 0; i < int(count); i++ { - key, err := hp.ReadString(ctx) - if err != nil { - return err - } - value, err := hp.ReadString(ctx) - if err != nil { - return err - } - headers[key] = value - } - } else { - // Skip reading info section on the first - // unsupported info type. - break - } - } - t.readHeaders = headers - - return nil -} - -func (t *THeaderTransport) needReadFrame() bool { - if t.clientType == clientUnknown { - // This is a new connection that's never read before. - return true - } - if t.isFramed() && t.frameReader == nil { - // We just finished the last frame. - return true - } - return false -} - -func (t *THeaderTransport) Read(p []byte) (read int, err error) { - // Here using context.Background instead of a context passed in is safe. - // First is that there's no way to pass context into this function. - // Then, 99% of the case when calling this Read frame is already read - // into frameReader. ReadFrame here is more of preventing bugs that - // didn't call ReadFrame before calling Read. - err = t.ReadFrame(context.Background()) - if err != nil { - return - } - if t.frameReader != nil { - read, err = t.frameReader.Read(p) - if err == nil && t.frameBuffer.Len() <= 0 { - // the last Read finished the frame, do endOfFrame - // handling here. - err = t.endOfFrame() - } else if err == io.EOF { - err = t.endOfFrame() - if err != nil { - return - } - if read == 0 { - // Try to read the next frame when we hit EOF - // (end of frame) immediately. - // When we got here, it means the last read - // finished the previous frame, but didn't - // do endOfFrame handling yet. - // We have to read the next frame here, - // as otherwise we would return 0 and nil, - // which is a case not handled well by most - // protocol implementations. - return t.Read(p) - } - } - return - } - return t.reader.Read(p) -} - -// Write writes data to the write buffer. -// -// You need to call Flush to actually write them to the transport. -func (t *THeaderTransport) Write(p []byte) (int, error) { - if t.writeBuffer == nil { - t.writeBuffer = getBufFromPool() - } - return t.writeBuffer.Write(p) -} - -// Flush writes the appropriate header and the write buffer to the underlying transport. -func (t *THeaderTransport) Flush(ctx context.Context) error { - if t.writeBuffer == nil || t.writeBuffer.Len() == 0 { - return nil - } - - defer returnBufToPool(&t.writeBuffer) - - switch t.clientType { - default: - fallthrough - case clientUnknown: - t.clientType = clientHeaders - fallthrough - case clientHeaders: - headers := NewTMemoryBuffer() - hp := NewTCompactProtocol(headers) - hp.SetTConfiguration(t.cfg) - if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil { - return NewTTransportExceptionFromError(err) - } - for _, transform := range t.writeTransforms { - if _, err := hp.writeVarint32(int32(transform)); err != nil { - return NewTTransportExceptionFromError(err) - } - } - if len(t.writeHeaders) > 0 { - if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil { - return NewTTransportExceptionFromError(err) - } - for key, value := range t.writeHeaders { - if err := hp.WriteString(ctx, key); err != nil { - return NewTTransportExceptionFromError(err) - } - if err := hp.WriteString(ctx, value); err != nil { - return NewTTransportExceptionFromError(err) - } - } - } - padding := 4 - headers.Len()%4 - if padding < 4 { - buf := t.buffer[:padding] - for i := range buf { - buf[i] = 0 - } - if _, err := headers.Write(buf); err != nil { - return NewTTransportExceptionFromError(err) - } - } - - payload := getBufFromPool() - defer returnBufToPool(&payload) - meta := headerMeta{ - MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask, - SequenceID: t.SequenceID, - HeaderLength: uint16(headers.Len() / 4), - } - if err := binary.Write(payload, binary.BigEndian, meta); err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := io.Copy(payload, headers); err != nil { - return NewTTransportExceptionFromError(err) - } - - writer, err := NewTransformWriter(payload, t.writeTransforms) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if _, err := io.Copy(writer, t.writeBuffer); err != nil { - return NewTTransportExceptionFromError(err) - } - if err := writer.Close(); err != nil { - return NewTTransportExceptionFromError(err) - } - - // First write frame length - buf := t.buffer[:size32] - binary.BigEndian.PutUint32(buf, uint32(payload.Len())) - if _, err := t.transport.Write(buf); err != nil { - return NewTTransportExceptionFromError(err) - } - // Then write the payload - if _, err := io.Copy(t.transport, payload); err != nil { - return NewTTransportExceptionFromError(err) - } - - case clientFramedBinary, clientFramedCompact: - buf := t.buffer[:size32] - binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len())) - if _, err := t.transport.Write(buf); err != nil { - return NewTTransportExceptionFromError(err) - } - fallthrough - case clientUnframedBinary, clientUnframedCompact: - if _, err := io.Copy(t.transport, t.writeBuffer); err != nil { - return NewTTransportExceptionFromError(err) - } - } - - select { - default: - case <-ctx.Done(): - return NewTTransportExceptionFromError(ctx.Err()) - } - - return t.transport.Flush(ctx) -} - -// Close closes the transport, along with its underlying transport. -func (t *THeaderTransport) Close() error { - if err := t.Flush(context.Background()); err != nil { - return err - } - return t.transport.Close() -} - -// RemainingBytes calls underlying transport's RemainingBytes. -// -// Even in framed cases, because of all the possible compression transforms -// involved, the remaining frame size is likely to be different from the actual -// remaining readable bytes, so we don't bother to keep tracking the remaining -// frame size by ourselves and just use the underlying transport's -// RemainingBytes directly. -func (t *THeaderTransport) RemainingBytes() uint64 { - return t.transport.RemainingBytes() -} - -// GetReadHeaders returns the THeaderMap read from transport. -func (t *THeaderTransport) GetReadHeaders() THeaderMap { - return t.readHeaders -} - -// SetWriteHeader sets a header for write. -func (t *THeaderTransport) SetWriteHeader(key, value string) { - t.writeHeaders[key] = value -} - -// ClearWriteHeaders clears all write headers previously set. -func (t *THeaderTransport) ClearWriteHeaders() { - t.writeHeaders = make(THeaderMap) -} - -// AddTransform add a transform for writing. -func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error { - if !supportedTransformIDs[transform] { - return NewTProtocolExceptionWithType( - NOT_IMPLEMENTED, - fmt.Errorf("THeaderTransformID %d not supported", transform), - ) - } - t.writeTransforms = append(t.writeTransforms, transform) - return nil -} - -// Protocol returns the wrapped protocol id used in this THeaderTransport. -func (t *THeaderTransport) Protocol() THeaderProtocolID { - switch t.clientType { - default: - return t.protocolID - case clientFramedBinary, clientUnframedBinary: - return THeaderProtocolBinary - case clientFramedCompact, clientUnframedCompact: - return THeaderProtocolCompact - } -} - -func (t *THeaderTransport) isFramed() bool { - switch t.clientType { - default: - return false - case clientHeaders, clientFramedBinary, clientFramedCompact: - return true - } -} - -// SetTConfiguration implements TConfigurationSetter. -func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(t.transport, cfg) - t.cfg = cfg -} - -// THeaderTransportFactory is a TTransportFactory implementation to create -// THeaderTransport. -// -// It also implements TConfigurationSetter. -type THeaderTransportFactory struct { - // The underlying factory, could be nil. - Factory TTransportFactory - - cfg *TConfiguration -} - -// Deprecated: Use NewTHeaderTransportFactoryConf instead. -func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory { - return NewTHeaderTransportFactoryConf(factory, &TConfiguration{ - noPropagation: true, - }) -} - -// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with -// the given *TConfiguration. -func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory { - return &THeaderTransportFactory{ - Factory: factory, - - cfg: conf, - } -} - -// GetTransport implements TTransportFactory. -func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if f.Factory != nil { - t, err := f.Factory.GetTransport(trans) - if err != nil { - return nil, err - } - return NewTHeaderTransportConf(t, f.cfg), nil - } - return NewTHeaderTransportConf(trans, f.cfg), nil -} - -// SetTConfiguration implements TConfigurationSetter. -func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) { - PropagateTConfiguration(f.Factory, f.cfg) - f.cfg = cfg -} - -var ( - _ TConfigurationSetter = (*THeaderTransportFactory)(nil) - _ TConfigurationSetter = (*THeaderTransport)(nil) -) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go deleted file mode 100644 index ce62c96a..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" - "errors" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -// Default to using the shared http client. Library users are -// free to change this global client or specify one through -// THttpClientOptions. -var DefaultHttpClient *http.Client = http.DefaultClient - -type THttpClient struct { - client *http.Client - response *http.Response - url *url.URL - requestBuffer *bytes.Buffer - header http.Header -} - -type THttpClientTransportFactory struct { - options THttpClientOptions - url string -} - -func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*THttpClient) - if ok && t.url != nil { - return NewTHttpClientWithOptions(t.url.String(), p.options) - } - } - return NewTHttpClientWithOptions(p.url, p.options) -} - -type THttpClientOptions struct { - // If nil, DefaultHttpClient is used - Client *http.Client -} - -func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return &THttpClientTransportFactory{url: url, options: options} -} - -func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - parsedURL, err := url.Parse(urlstr) - if err != nil { - return nil, err - } - buf := make([]byte, 0, 1024) - client := options.Client - if client == nil { - client = DefaultHttpClient - } - httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}} - return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil -} - -func NewTHttpClient(urlstr string) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) -} - -// Set the HTTP Header for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") -func (p *THttpClient) SetHeader(key string, value string) { - p.header.Add(key, value) -} - -// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// hdrValue := httpTrans.GetHeader("User-Agent") -func (p *THttpClient) GetHeader(key string) string { - return p.header.Get(key) -} - -// Deletes the HTTP Header given a Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.DelHeader("User-Agent") -func (p *THttpClient) DelHeader(key string) { - p.header.Del(key) -} - -func (p *THttpClient) Open() error { - // do nothing - return nil -} - -func (p *THttpClient) IsOpen() bool { - return p.response != nil || p.requestBuffer != nil -} - -func (p *THttpClient) closeResponse() error { - var err error - if p.response != nil && p.response.Body != nil { - // The docs specify that if keepalive is enabled and the response body is not - // read to completion the connection will never be returned to the pool and - // reused. Errors are being ignored here because if the connection is invalid - // and this fails for some reason, the Close() method will do any remaining - // cleanup. - io.Copy(ioutil.Discard, p.response.Body) - - err = p.response.Body.Close() - } - - p.response = nil - return err -} - -func (p *THttpClient) Close() error { - if p.requestBuffer != nil { - p.requestBuffer.Reset() - p.requestBuffer = nil - } - return p.closeResponse() -} - -func (p *THttpClient) Read(buf []byte) (int, error) { - if p.response == nil { - return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") - } - n, err := p.response.Body.Read(buf) - if n > 0 && (err == nil || errors.Is(err, io.EOF)) { - return n, nil - } - return n, NewTTransportExceptionFromError(err) -} - -func (p *THttpClient) ReadByte() (c byte, err error) { - if p.response == nil { - return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") - } - return readByte(p.response.Body) -} - -func (p *THttpClient) Write(buf []byte) (int, error) { - if p.requestBuffer == nil { - return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") - } - return p.requestBuffer.Write(buf) -} - -func (p *THttpClient) WriteByte(c byte) error { - if p.requestBuffer == nil { - return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") - } - return p.requestBuffer.WriteByte(c) -} - -func (p *THttpClient) WriteString(s string) (n int, err error) { - if p.requestBuffer == nil { - return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.") - } - return p.requestBuffer.WriteString(s) -} - -func (p *THttpClient) Flush(ctx context.Context) error { - // Close any previous response body to avoid leaking connections. - p.closeResponse() - - // Give up the ownership of the current request buffer to http request, - // and create a new buffer for the next request. - buf := p.requestBuffer - p.requestBuffer = new(bytes.Buffer) - req, err := http.NewRequest("POST", p.url.String(), buf) - if err != nil { - return NewTTransportExceptionFromError(err) - } - req.Header = p.header - if ctx != nil { - req = req.WithContext(ctx) - } - response, err := p.client.Do(req) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if response.StatusCode != http.StatusOK { - // Close the response to avoid leaking file descriptors. closeResponse does - // more than just call Close(), so temporarily assign it and reuse the logic. - p.response = response - p.closeResponse() - - // TODO(pomack) log bad response - return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) - } - p.response = response - return nil -} - -func (p *THttpClient) RemainingBytes() (num_bytes uint64) { - len := p.response.ContentLength - if len >= 0 { - return uint64(len) - } - - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -// Deprecated: Use NewTHttpClientTransportFactory instead. -func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. -func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, options) -} - -// Deprecated: Use NewTHttpClientWithOptions instead. -func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, options) -} - -// Deprecated: Use NewTHttpClient instead. -func NewTHttpPostClient(urlstr string) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go deleted file mode 100644 index bc692276..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "compress/gzip" - "io" - "net/http" - "strings" - "sync" -) - -// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function -func NewThriftHandlerFunc(processor TProcessor, - inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { - - return gz(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/x-thrift") - - transport := NewStreamTransport(r.Body, w) - processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) - }) -} - -// gz transparently compresses the HTTP response if the client supports it. -func gz(handler http.HandlerFunc) http.HandlerFunc { - sp := &sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, - } - - return func(w http.ResponseWriter, r *http.Request) { - if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { - handler(w, r) - return - } - w.Header().Set("Content-Encoding", "gzip") - gz := sp.Get().(*gzip.Writer) - gz.Reset(w) - defer func() { - _ = gz.Close() - sp.Put(gz) - }() - gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} - handler(gzw, r) - } -} - -type gzipResponseWriter struct { - io.Writer - http.ResponseWriter -} - -func (w gzipResponseWriter) Write(b []byte) (int, error) { - return w.Writer.Write(b) -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go deleted file mode 100644 index 1c477990..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "context" - "io" -) - -// StreamTransport is a Transport made of an io.Reader and/or an io.Writer -type StreamTransport struct { - io.Reader - io.Writer - isReadWriter bool - closed bool -} - -type StreamTransportFactory struct { - Reader io.Reader - Writer io.Writer - isReadWriter bool -} - -func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*StreamTransport) - if ok { - if t.isReadWriter { - return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil - } - if t.Reader != nil && t.Writer != nil { - return NewStreamTransport(t.Reader, t.Writer), nil - } - if t.Reader != nil && t.Writer == nil { - return NewStreamTransportR(t.Reader), nil - } - if t.Reader == nil && t.Writer != nil { - return NewStreamTransportW(t.Writer), nil - } - return &StreamTransport{}, nil - } - } - if p.isReadWriter { - return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil - } - if p.Reader != nil && p.Writer != nil { - return NewStreamTransport(p.Reader, p.Writer), nil - } - if p.Reader != nil && p.Writer == nil { - return NewStreamTransportR(p.Reader), nil - } - if p.Reader == nil && p.Writer != nil { - return NewStreamTransportW(p.Writer), nil - } - return &StreamTransport{}, nil -} - -func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { - return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} -} - -func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportR(r io.Reader) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r)} -} - -func NewStreamTransportW(w io.Writer) *StreamTransport { - return &StreamTransport{Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { - bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) - return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} -} - -func (p *StreamTransport) IsOpen() bool { - return !p.closed -} - -// implicitly opened on creation, can't be reopened once closed -func (p *StreamTransport) Open() error { - if !p.closed { - return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") - } else { - return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") - } -} - -// Closes both the input and output streams. -func (p *StreamTransport) Close() error { - if p.closed { - return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") - } - p.closed = true - closedReader := false - if p.Reader != nil { - c, ok := p.Reader.(io.Closer) - if ok { - e := c.Close() - closedReader = true - if e != nil { - return e - } - } - p.Reader = nil - } - if p.Writer != nil && (!closedReader || !p.isReadWriter) { - c, ok := p.Writer.(io.Closer) - if ok { - e := c.Close() - if e != nil { - return e - } - } - p.Writer = nil - } - return nil -} - -// Flushes the underlying output stream if not null. -func (p *StreamTransport) Flush(ctx context.Context) error { - if p.Writer == nil { - return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") - } - f, ok := p.Writer.(Flusher) - if ok { - err := f.Flush() - if err != nil { - return NewTTransportExceptionFromError(err) - } - } - return nil -} - -func (p *StreamTransport) Read(c []byte) (n int, err error) { - n, err = p.Reader.Read(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) ReadByte() (c byte, err error) { - f, ok := p.Reader.(io.ByteReader) - if ok { - c, err = f.ReadByte() - } else { - c, err = readByte(p.Reader) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) Write(c []byte) (n int, err error) { - n, err = p.Writer.Write(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteByte(c byte) (err error) { - f, ok := p.Writer.(io.ByteWriter) - if ok { - err = f.WriteByte(c) - } else { - err = writeByte(p.Writer, c) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteString(s string) (n int, err error) { - f, ok := p.Writer.(stringWriter) - if ok { - n, err = f.WriteString(s) - } else { - n, err = p.Writer.Write([]byte(s)) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.Reader, conf) - PropagateTConfiguration(p.Writer, conf) -} - -var _ TConfigurationSetter = (*StreamTransport)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go deleted file mode 100644 index d248ecfe..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go +++ /dev/null @@ -1,564 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "encoding/base64" - "fmt" -) - -const ( - THRIFT_JSON_PROTOCOL_VERSION = 1 -) - -// for references to _ParseContext see tsimplejson_protocol.go - -// JSON protocol implementation for thrift. -// Utilizes Simple JSON protocol -// -type TJSONProtocol struct { - *TSimpleJSONProtocol -} - -// Constructor -func NewTJSONProtocol(t TTransport) *TJSONProtocol { - v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} - v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) - v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) - return v -} - -// Factory -type TJSONProtocolFactory struct{} - -func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTJSONProtocol(trans) -} - -func NewTJSONProtocolFactory() *TJSONProtocolFactory { - return &TJSONProtocolFactory{} -} - -func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil { - return e - } - if e := p.WriteString(ctx, name); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(ctx, seqId); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - if e := p.WriteI16(ctx, id); e != nil { - return e - } - if e := p.OutputObjectBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(typeId) - if e1 != nil { - return e1 - } - if e := p.WriteString(ctx, s); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } - -func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(keyType) - if e1 != nil { - return e1 - } - if e := p.WriteString(ctx, s); e != nil { - return e - } - s, e1 = p.TypeIdToString(valueType) - if e1 != nil { - return e1 - } - if e := p.WriteString(ctx, s); e != nil { - return e - } - if e := p.WriteI64(ctx, int64(size)); e != nil { - return e - } - return p.OutputObjectBegin() -} - -func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error { - if e := p.OutputObjectEnd(); e != nil { - return e - } - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error { - if b { - return p.WriteI32(ctx, 1) - } - return p.WriteI32(ctx, 0) -} - -func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error { - return p.WriteI32(ctx, int32(b)) -} - -func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error { - return p.WriteI32(ctx, int32(v)) -} - -func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error { - return p.OutputF64(v) -} - -func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error { - return p.OutputString(v) -} - -func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - version, err := p.ReadI32(ctx) - if err != nil { - return name, typeId, seqId, err - } - if version != THRIFT_JSON_PROTOCOL_VERSION { - e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) - return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) - - } - if name, err = p.ReadString(ctx); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte(ctx) - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(ctx); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error { - err := p.ParseListEnd() - return err -} - -func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { - b, _ := p.reader.Peek(1) - if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { - return "", STOP, -1, nil - } - fieldId, err := p.ReadI16(ctx) - if err != nil { - return "", STOP, fieldId, err - } - if _, err = p.ParseObjectStart(); err != nil { - return "", STOP, fieldId, err - } - sType, err := p.ReadString(ctx) - if err != nil { - return "", STOP, fieldId, err - } - fType, err := p.StringToTypeId(sType) - return "", fType, fieldId, err -} - -func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - sKeyType, e := p.ReadString(ctx) - if e != nil { - return keyType, valueType, size, e - } - keyType, e = p.StringToTypeId(sKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - sValueType, e := p.ReadString(ctx) - if e != nil { - return keyType, valueType, size, e - } - valueType, e = p.StringToTypeId(sValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, err := p.ReadI64(ctx) - if err != nil { - return keyType, valueType, size, err - } - err = checkSizeForProtocol(int32(iSize), p.cfg) - if err != nil { - return keyType, valueType, 0, err - } - size = int(iSize) - - _, e = p.ParseObjectStart() - return keyType, valueType, size, e -} - -func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error { - e := p.ParseObjectEnd() - if e != nil { - return e - } - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) { - value, err := p.ReadI32(ctx) - return (value != 0), err -} - -func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.ReadI64(ctx) - return int8(v), err -} - -func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) { - v, err := p.ReadI64(ctx) - return int16(v), err -} - -func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) { - v, err := p.ReadI64(ctx) - return int32(v), err -} - -func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { - err = p.writer.Flush() - if err == nil { - err = p.trans.Flush(ctx) - } - return NewTProtocolException(err) -} - -func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.OutputString(s); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - // We don't really use the ctx in ReadString implementation, - // so this is safe for now. - // We might want to add context to ParseElemListBegin if we start to use - // ctx in ReadString implementation in the future. - sElemType, err := p.ReadString(context.Background()) - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, _, err := p.ParseI64() - if err != nil { - return elemType, 0, err - } - err = checkSizeForProtocol(int32(nSize), p.cfg) - if err != nil { - return elemType, 0, err - } - size = int(nSize) - return elemType, size, nil -} - -func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { - switch byte(fieldType) { - case BOOL: - return "tf", nil - case BYTE: - return "i8", nil - case I16: - return "i16", nil - case I32: - return "i32", nil - case I64: - return "i64", nil - case DOUBLE: - return "dbl", nil - case STRING: - return "str", nil - case STRUCT: - return "rec", nil - case MAP: - return "map", nil - case SET: - return "set", nil - case LIST: - return "lst", nil - } - - e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { - switch fieldType { - case "tf": - return TType(BOOL), nil - case "i8": - return TType(BYTE), nil - case "i16": - return TType(I16), nil - case "i32": - return TType(I32), nil - case "i64": - return TType(I64), nil - case "dbl": - return TType(DOUBLE), nil - case "str": - return TType(STRING), nil - case "rec": - return TType(STRUCT), nil - case "map": - return TType(MAP), nil - case "set": - return TType(SET), nil - case "lst": - return TType(LIST), nil - } - - e := fmt.Errorf("Unknown type identifier: %s", fieldType) - return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -var _ TConfigurationSetter = (*TJSONProtocol)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/logger.go b/vendor/github.com/apache/thrift/lib/go/thrift/logger.go deleted file mode 100644 index c42aac99..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/logger.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "log" - "os" - "testing" -) - -// Logger is a simple wrapper of a logging function. -// -// In reality the users might actually use different logging libraries, and they -// are not always compatible with each other. -// -// Logger is meant to be a simple common ground that it's easy to wrap whatever -// logging library they use into. -// -// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design -// discussion behind it. -type Logger func(msg string) - -// NopLogger is a Logger implementation that does nothing. -func NopLogger(msg string) {} - -// StdLogger wraps stdlib log package into a Logger. -// -// If logger passed in is nil, it will fallback to use stderr and default flags. -func StdLogger(logger *log.Logger) Logger { - if logger == nil { - logger = log.New(os.Stderr, "", log.LstdFlags) - } - return func(msg string) { - logger.Print(msg) - } -} - -// TestLogger is a Logger implementation can be used in test codes. -// -// It fails the test when being called. -func TestLogger(tb testing.TB) Logger { - return func(msg string) { - tb.Errorf("logger called with msg: %q", msg) - } -} - -func fallbackLogger(logger Logger) Logger { - if logger == nil { - return StdLogger(nil) - } - return logger -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go deleted file mode 100644 index 5936d273..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" -) - -// Memory buffer-based implementation of the TTransport interface. -type TMemoryBuffer struct { - *bytes.Buffer - size int -} - -type TMemoryBufferTransportFactory struct { - size int -} - -func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*TMemoryBuffer) - if ok && t.size > 0 { - return NewTMemoryBufferLen(t.size), nil - } - } - return NewTMemoryBufferLen(p.size), nil -} - -func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { - return &TMemoryBufferTransportFactory{size: size} -} - -func NewTMemoryBuffer() *TMemoryBuffer { - return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} -} - -func NewTMemoryBufferLen(size int) *TMemoryBuffer { - buf := make([]byte, 0, size) - return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} -} - -func (p *TMemoryBuffer) IsOpen() bool { - return true -} - -func (p *TMemoryBuffer) Open() error { - return nil -} - -func (p *TMemoryBuffer) Close() error { - p.Buffer.Reset() - return nil -} - -// Flushing a memory buffer is a no-op -func (p *TMemoryBuffer) Flush(ctx context.Context) error { - return nil -} - -func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { - return uint64(p.Buffer.Len()) -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go b/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go deleted file mode 100644 index 25ab2e98..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Message type constants in the Thrift protocol. -type TMessageType int32 - -const ( - INVALID_TMESSAGE_TYPE TMessageType = 0 - CALL TMessageType = 1 - REPLY TMessageType = 2 - EXCEPTION TMessageType = 3 - ONEWAY TMessageType = 4 -) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/middleware.go b/vendor/github.com/apache/thrift/lib/go/thrift/middleware.go deleted file mode 100644 index 8a788df0..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/middleware.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the -// TProcessorFunctions for that TProcessor. -// -// Middlewares are passed in the name of the function as set in the processor -// map of the TProcessor. -type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction - -// WrapProcessor takes an existing TProcessor and wraps each of its inner -// TProcessorFunctions with the middlewares passed in and returns it. -// -// Middlewares will be called in the order that they are defined: -// -// 1. Middlewares[0] -// 2. Middlewares[1] -// ... -// N. Middlewares[n] -func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor { - for name, processorFunc := range processor.ProcessorMap() { - wrapped := processorFunc - // Add middlewares in reverse so the first in the list is the outermost. - for i := len(middlewares) - 1; i >= 0; i-- { - wrapped = middlewares[i](name, wrapped) - } - processor.AddToProcessorMap(name, wrapped) - } - return processor -} - -// WrappedTProcessorFunction is a convenience struct that implements the -// TProcessorFunction interface that can be used when implementing custom -// Middleware. -type WrappedTProcessorFunction struct { - // Wrapped is called by WrappedTProcessorFunction.Process and should be a - // "wrapped" call to a base TProcessorFunc.Process call. - Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) -} - -// Process implements the TProcessorFunction interface using p.Wrapped. -func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) { - return p.Wrapped(ctx, seqID, in, out) -} - -// verify that WrappedTProcessorFunction implements TProcessorFunction -var ( - _ TProcessorFunction = WrappedTProcessorFunction{} - _ TProcessorFunction = (*WrappedTProcessorFunction)(nil) -) - -// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls -// with custom middleware. -type ClientMiddleware func(TClient) TClient - -// WrappedTClient is a convenience struct that implements the TClient interface -// using inner Wrapped function. -// -// This is provided to aid in developing ClientMiddleware. -type WrappedTClient struct { - Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) -} - -// Call implements the TClient interface by calling and returning c.Wrapped. -func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) { - return c.Wrapped(ctx, method, args, result) -} - -// verify that WrappedTClient implements TClient -var ( - _ TClient = WrappedTClient{} - _ TClient = (*WrappedTClient)(nil) -) - -// WrapClient wraps the given TClient in the given middlewares. -// -// Middlewares will be called in the order that they are defined: -// -// 1. Middlewares[0] -// 2. Middlewares[1] -// ... -// N. Middlewares[n] -func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient { - // Add middlewares in reverse so the first in the list is the outermost. - for i := len(middlewares) - 1; i >= 0; i-- { - client = middlewares[i](client) - } - return client -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go deleted file mode 100644 index cacbf6be..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "fmt" - "strings" -) - -/* -TMultiplexedProtocol is a protocol-independent concrete decorator -that allows a Thrift client to communicate with a multiplexing Thrift server, -by prepending the service name to the function name during function calls. - -NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request -from a multiplexing client. - -This example uses a single socket transport to invoke two services: - -socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) -transport := thrift.NewTFramedTransport(socket) -protocol := thrift.NewTBinaryProtocolTransport(transport) - -mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") -service := Calculator.NewCalculatorClient(mp) - -mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") -service2 := WeatherReport.NewWeatherReportClient(mp2) - -err := transport.Open() -if err != nil { - t.Fatal("Unable to open client socket", err) -} - -fmt.Println(service.Add(2,2)) -fmt.Println(service2.GetTemperature()) -*/ - -type TMultiplexedProtocol struct { - TProtocol - serviceName string -} - -const MULTIPLEXED_SEPARATOR = ":" - -func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { - return &TMultiplexedProtocol{ - TProtocol: protocol, - serviceName: serviceName, - } -} - -func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error { - if typeId == CALL || typeId == ONEWAY { - return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) - } else { - return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid) - } -} - -/* -TMultiplexedProcessor is a TProcessor allowing -a single TServer to provide multiple services. - -To do so, you instantiate the processor and then register additional -processors with it, as shown in the following example: - -var processor = thrift.NewTMultiplexedProcessor() - -firstProcessor := -processor.RegisterProcessor("FirstService", firstProcessor) - -processor.registerProcessor( - "Calculator", - Calculator.NewCalculatorProcessor(&CalculatorHandler{}), -) - -processor.registerProcessor( - "WeatherReport", - WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), -) - -serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) -if err != nil { - t.Fatal("Unable to create server socket", err) -} -server := thrift.NewTSimpleServer2(processor, serverTransport) -server.Serve(); -*/ - -type TMultiplexedProcessor struct { - serviceProcessorMap map[string]TProcessor - DefaultProcessor TProcessor -} - -func NewTMultiplexedProcessor() *TMultiplexedProcessor { - return &TMultiplexedProcessor{ - serviceProcessorMap: make(map[string]TProcessor), - } -} - -// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}" -// to TProcessorFunction for any registered processors. If there is also a -// DefaultProcessor, the keys for the methods on that processor will simply be -// "{FunctionName}". If the TMultiplexedProcessor has both a DefaultProcessor and -// other registered processors, then the keys will be a mix of both formats. -// -// The implementation differs with other TProcessors in that the map returned is -// a new map, while most TProcessors just return their internal mapping directly. -// This means that edits to the map returned by this implementation of ProcessorMap -// will not affect the underlying mapping within the TMultiplexedProcessor. -func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction { - processorFuncMap := make(map[string]TProcessorFunction) - for name, processor := range t.serviceProcessorMap { - for method, processorFunc := range processor.ProcessorMap() { - processorFuncName := name + MULTIPLEXED_SEPARATOR + method - processorFuncMap[processorFuncName] = processorFunc - } - } - if t.DefaultProcessor != nil { - for method, processorFunc := range t.DefaultProcessor.ProcessorMap() { - processorFuncMap[method] = processorFunc - } - } - return processorFuncMap -} - -// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on -// the format of "name". -// -// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}", -// then it sets the given TProcessorFunction on the inner TProcessor with the -// ProcessorName component using the FunctionName component. -// -// If "name" is just in the format "{FunctionName}", that is to say there is no -// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor -// configured, then it will set the given TProcessorFunction on the DefaultProcessor -// using the given name. -// -// If there is not a TProcessor available for the given name, then this function -// does nothing. This can happen when there is no TProcessor registered for -// the given ProcessorName or if all that is given is the FunctionName and there -// is no DefaultProcessor set. -func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) { - components := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) - if len(components) != 2 { - if t.DefaultProcessor != nil && len(components) == 1 { - t.DefaultProcessor.AddToProcessorMap(components[0], processorFunc) - } - return - } - processorName := components[0] - funcName := components[1] - if processor, ok := t.serviceProcessorMap[processorName]; ok { - processor.AddToProcessorMap(funcName, processorFunc) - } - -} - -// verify that TMultiplexedProcessor implements TProcessor -var _ TProcessor = (*TMultiplexedProcessor)(nil) - -func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { - t.DefaultProcessor = processor -} - -func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { - if t.serviceProcessorMap == nil { - t.serviceProcessorMap = make(map[string]TProcessor) - } - t.serviceProcessorMap[name] = processor -} - -func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { - name, typeId, seqid, err := in.ReadMessageBegin(ctx) - if err != nil { - return false, NewTProtocolException(err) - } - if typeId != CALL && typeId != ONEWAY { - return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId)) - } - //extract the service name - v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) - if len(v) != 2 { - if t.DefaultProcessor != nil { - smb := NewStoredMessageProtocol(in, name, typeId, seqid) - return t.DefaultProcessor.Process(ctx, smb, out) - } - return false, NewTProtocolException(fmt.Errorf( - "Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", - name, - )) - } - actualProcessor, ok := t.serviceProcessorMap[v[0]] - if !ok { - return false, NewTProtocolException(fmt.Errorf( - "Service name not found: %s. Did you forget to call registerProcessor()?", - v[0], - )) - } - smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) - return actualProcessor.Process(ctx, smb, out) -} - -//Protocol that use stored message for ReadMessageBegin -type storedMessageProtocol struct { - TProtocol - name string - typeId TMessageType - seqid int32 -} - -func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { - return &storedMessageProtocol{protocol, name, typeId, seqid} -} - -func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) { - return s.name, s.typeId, s.seqid, nil -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go b/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go deleted file mode 100644 index e4512d20..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "math" - "strconv" -) - -type Numeric interface { - Int64() int64 - Int32() int32 - Int16() int16 - Byte() byte - Int() int - Float64() float64 - Float32() float32 - String() string - isNull() bool -} - -type numeric struct { - iValue int64 - dValue float64 - sValue string - isNil bool -} - -var ( - INFINITY Numeric - NEGATIVE_INFINITY Numeric - NAN Numeric - ZERO Numeric - NUMERIC_NULL Numeric -) - -func NewNumericFromDouble(dValue float64) Numeric { - if math.IsInf(dValue, 1) { - return INFINITY - } - if math.IsInf(dValue, -1) { - return NEGATIVE_INFINITY - } - if math.IsNaN(dValue) { - return NAN - } - iValue := int64(dValue) - sValue := strconv.FormatFloat(dValue, 'g', 10, 64) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI64(iValue int64) Numeric { - dValue := float64(iValue) - sValue := strconv.FormatInt(iValue, 10) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI32(iValue int32) Numeric { - dValue := float64(iValue) - sValue := strconv.FormatInt(int64(iValue), 10) - isNil := false - return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromString(sValue string) Numeric { - if sValue == INFINITY.String() { - return INFINITY - } - if sValue == NEGATIVE_INFINITY.String() { - return NEGATIVE_INFINITY - } - if sValue == NAN.String() { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - isNil := len(sValue) == 0 - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromJSONString(sValue string, isNull bool) Numeric { - if isNull { - return NewNullNumeric() - } - if sValue == JSON_INFINITY { - return INFINITY - } - if sValue == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY - } - if sValue == JSON_NAN { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} -} - -func NewNullNumeric() Numeric { - return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} -} - -func (p *numeric) Int64() int64 { - return p.iValue -} - -func (p *numeric) Int32() int32 { - return int32(p.iValue) -} - -func (p *numeric) Int16() int16 { - return int16(p.iValue) -} - -func (p *numeric) Byte() byte { - return byte(p.iValue) -} - -func (p *numeric) Int() int { - return int(p.iValue) -} - -func (p *numeric) Float64() float64 { - return p.dValue -} - -func (p *numeric) Float32() float32 { - return float32(p.dValue) -} - -func (p *numeric) String() string { - return p.sValue -} - -func (p *numeric) isNull() bool { - return p.isNil -} - -func init() { - INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} - NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} - NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} - ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} - NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go b/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go deleted file mode 100644 index fb564ea8..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -/////////////////////////////////////////////////////////////////////////////// -// This file is home to helpers that convert from various base types to -// respective pointer types. This is necessary because Go does not permit -// references to constants, nor can a pointer type to base type be allocated -// and initialized in a single expression. -// -// E.g., this is not allowed: -// -// var ip *int = &5 -// -// But this *is* allowed: -// -// func IntPtr(i int) *int { return &i } -// var ip *int = IntPtr(5) -// -// Since pointers to base types are commonplace as [optional] fields in -// exported thrift structs, we factor such helpers here. -/////////////////////////////////////////////////////////////////////////////// - -func Float32Ptr(v float32) *float32 { return &v } -func Float64Ptr(v float64) *float64 { return &v } -func IntPtr(v int) *int { return &v } -func Int8Ptr(v int8) *int8 { return &v } -func Int16Ptr(v int16) *int16 { return &v } -func Int32Ptr(v int32) *int32 { return &v } -func Int64Ptr(v int64) *int64 { return &v } -func StringPtr(v string) *string { return &v } -func Uint32Ptr(v uint32) *uint32 { return &v } -func Uint64Ptr(v uint64) *uint64 { return &v } -func BoolPtr(v bool) *bool { return &v } -func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go deleted file mode 100644 index 245a3ccf..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -// A processor is a generic object which operates upon an input stream and -// writes to some output stream. -type TProcessor interface { - Process(ctx context.Context, in, out TProtocol) (bool, TException) - - // ProcessorMap returns a map of thrift method names to TProcessorFunctions. - ProcessorMap() map[string]TProcessorFunction - - // AddToProcessorMap adds the given TProcessorFunction to the internal - // processor map at the given key. - // - // If one is already set at the given key, it will be replaced with the new - // TProcessorFunction. - AddToProcessorMap(string, TProcessorFunction) -} - -type TProcessorFunction interface { - Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) -} - -// The default processor factory just returns a singleton -// instance. -type TProcessorFactory interface { - GetProcessor(trans TTransport) TProcessor -} - -type tProcessorFactory struct { - processor TProcessor -} - -func NewTProcessorFactory(p TProcessor) TProcessorFactory { - return &tProcessorFactory{processor: p} -} - -func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { - return p.processor -} - -/** - * The default processor factory just returns a singleton - * instance. - */ -type TProcessorFunctionFactory interface { - GetProcessorFunction(trans TTransport) TProcessorFunction -} - -type tProcessorFunctionFactory struct { - processor TProcessorFunction -} - -func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { - return &tProcessorFunctionFactory{processor: p} -} - -func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { - return p.processor -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go deleted file mode 100644 index 8543b5fd..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" - "fmt" -) - -const ( - VERSION_MASK = 0xffff0000 - VERSION_1 = 0x80010000 -) - -type TProtocol interface { - WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error - WriteMessageEnd(ctx context.Context) error - WriteStructBegin(ctx context.Context, name string) error - WriteStructEnd(ctx context.Context) error - WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error - WriteFieldEnd(ctx context.Context) error - WriteFieldStop(ctx context.Context) error - WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error - WriteMapEnd(ctx context.Context) error - WriteListBegin(ctx context.Context, elemType TType, size int) error - WriteListEnd(ctx context.Context) error - WriteSetBegin(ctx context.Context, elemType TType, size int) error - WriteSetEnd(ctx context.Context) error - WriteBool(ctx context.Context, value bool) error - WriteByte(ctx context.Context, value int8) error - WriteI16(ctx context.Context, value int16) error - WriteI32(ctx context.Context, value int32) error - WriteI64(ctx context.Context, value int64) error - WriteDouble(ctx context.Context, value float64) error - WriteString(ctx context.Context, value string) error - WriteBinary(ctx context.Context, value []byte) error - - ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) - ReadMessageEnd(ctx context.Context) error - ReadStructBegin(ctx context.Context) (name string, err error) - ReadStructEnd(ctx context.Context) error - ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) - ReadFieldEnd(ctx context.Context) error - ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) - ReadMapEnd(ctx context.Context) error - ReadListBegin(ctx context.Context) (elemType TType, size int, err error) - ReadListEnd(ctx context.Context) error - ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) - ReadSetEnd(ctx context.Context) error - ReadBool(ctx context.Context) (value bool, err error) - ReadByte(ctx context.Context) (value int8, err error) - ReadI16(ctx context.Context) (value int16, err error) - ReadI32(ctx context.Context) (value int32, err error) - ReadI64(ctx context.Context) (value int64, err error) - ReadDouble(ctx context.Context) (value float64, err error) - ReadString(ctx context.Context) (value string, err error) - ReadBinary(ctx context.Context) (value []byte, err error) - - Skip(ctx context.Context, fieldType TType) (err error) - Flush(ctx context.Context) (err error) - - Transport() TTransport -} - -// The maximum recursive depth the skip() function will traverse -const DEFAULT_RECURSION_DEPTH = 64 - -// Skips over the next data element from the provided input TProtocol object. -func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) { - return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH) -} - -// Skips over the next data element from the provided input TProtocol object. -func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) { - - if maxDepth <= 0 { - return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) - } - - switch fieldType { - case BOOL: - _, err = self.ReadBool(ctx) - return - case BYTE: - _, err = self.ReadByte(ctx) - return - case I16: - _, err = self.ReadI16(ctx) - return - case I32: - _, err = self.ReadI32(ctx) - return - case I64: - _, err = self.ReadI64(ctx) - return - case DOUBLE: - _, err = self.ReadDouble(ctx) - return - case STRING: - _, err = self.ReadString(ctx) - return - case STRUCT: - if _, err = self.ReadStructBegin(ctx); err != nil { - return err - } - for { - _, typeId, _, err := self.ReadFieldBegin(ctx) - if err != nil { - return err - } - if typeId == STOP { - break - } - err = Skip(ctx, self, typeId, maxDepth-1) - if err != nil { - return err - } - self.ReadFieldEnd(ctx) - } - return self.ReadStructEnd(ctx) - case MAP: - keyType, valueType, size, err := self.ReadMapBegin(ctx) - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(ctx, self, keyType, maxDepth-1) - if err != nil { - return err - } - - err = Skip(ctx, self, valueType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadMapEnd(ctx) - case SET: - elemType, size, err := self.ReadSetBegin(ctx) - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(ctx, self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadSetEnd(ctx) - case LIST: - elemType, size, err := self.ReadListBegin(ctx) - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(ctx, self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadListEnd(ctx) - default: - return NewTProtocolExceptionWithType(INVALID_DATA, fmt.Errorf("Unknown data type %d", fieldType)) - } - return nil -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go deleted file mode 100644 index 9dcf4bfd..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "encoding/base64" - "errors" -) - -// Thrift Protocol exception -type TProtocolException interface { - TException - TypeId() int -} - -const ( - UNKNOWN_PROTOCOL_EXCEPTION = 0 - INVALID_DATA = 1 - NEGATIVE_SIZE = 2 - SIZE_LIMIT = 3 - BAD_VERSION = 4 - NOT_IMPLEMENTED = 5 - DEPTH_LIMIT = 6 -) - -type tProtocolException struct { - typeId int - err error - msg string -} - -var _ TProtocolException = (*tProtocolException)(nil) - -func (tProtocolException) TExceptionType() TExceptionType { - return TExceptionTypeProtocol -} - -func (p *tProtocolException) TypeId() int { - return p.typeId -} - -func (p *tProtocolException) String() string { - return p.msg -} - -func (p *tProtocolException) Error() string { - return p.msg -} - -func (p *tProtocolException) Unwrap() error { - return p.err -} - -func NewTProtocolException(err error) TProtocolException { - if err == nil { - return nil - } - - if e, ok := err.(TProtocolException); ok { - return e - } - - if errors.As(err, new(base64.CorruptInputError)) { - return NewTProtocolExceptionWithType(INVALID_DATA, err) - } - - return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err) -} - -func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { - if err == nil { - return nil - } - return &tProtocolException{ - typeId: errType, - err: err, - msg: err.Error(), - } -} - -func prependTProtocolException(prepend string, err TProtocolException) TProtocolException { - return &tProtocolException{ - typeId: err.TypeId(), - err: err, - msg: prepend + err.Error(), - } -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go deleted file mode 100644 index c40f796d..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory interface for constructing protocol instances. -type TProtocolFactory interface { - GetProtocol(trans TTransport) TProtocol -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/response_helper.go b/vendor/github.com/apache/thrift/lib/go/thrift/response_helper.go deleted file mode 100644 index d884c6ac..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/response_helper.go +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -// See https://godoc.org/context#WithValue on why do we need the unexported typedefs. -type responseHelperKey struct{} - -// TResponseHelper defines a object with a set of helper functions that can be -// retrieved from the context object passed into server handler functions. -// -// Use GetResponseHelper to retrieve the injected TResponseHelper implementation -// from the context object. -// -// The zero value of TResponseHelper is valid with all helper functions being -// no-op. -type TResponseHelper struct { - // THeader related functions - *THeaderResponseHelper -} - -// THeaderResponseHelper defines THeader related TResponseHelper functions. -// -// The zero value of *THeaderResponseHelper is valid with all helper functions -// being no-op. -type THeaderResponseHelper struct { - proto *THeaderProtocol -} - -// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the -// underlying TProtocol. -func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper { - if hp, ok := proto.(*THeaderProtocol); ok { - return &THeaderResponseHelper{ - proto: hp, - } - } - return nil -} - -// SetHeader sets a response header. -// -// It's no-op if the underlying protocol/transport does not support THeader. -func (h *THeaderResponseHelper) SetHeader(key, value string) { - if h != nil && h.proto != nil { - h.proto.SetWriteHeader(key, value) - } -} - -// ClearHeaders clears all the response headers previously set. -// -// It's no-op if the underlying protocol/transport does not support THeader. -func (h *THeaderResponseHelper) ClearHeaders() { - if h != nil && h.proto != nil { - h.proto.ClearWriteHeaders() - } -} - -// GetResponseHelper retrieves the TResponseHelper implementation injected into -// the context object. -// -// If no helper was found in the context object, a nop helper with ok == false -// will be returned. -func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) { - if v := ctx.Value(responseHelperKey{}); v != nil { - helper, ok = v.(TResponseHelper) - } - return -} - -// SetResponseHelper injects TResponseHelper into the context object. -func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context { - return context.WithValue(ctx, responseHelperKey{}, helper) -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go deleted file mode 100644 index 83fdf29f..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -type RichTransport struct { - TTransport -} - -// Wraps Transport to provide TRichTransport interface -func NewTRichTransport(trans TTransport) *RichTransport { - return &RichTransport{trans} -} - -func (r *RichTransport) ReadByte() (c byte, err error) { - return readByte(r.TTransport) -} - -func (r *RichTransport) WriteByte(c byte) error { - return writeByte(r.TTransport, c) -} - -func (r *RichTransport) WriteString(s string) (n int, err error) { - return r.Write([]byte(s)) -} - -func (r *RichTransport) RemainingBytes() (num_bytes uint64) { - return r.TTransport.RemainingBytes() -} - -func readByte(r io.Reader) (c byte, err error) { - v := [1]byte{0} - n, err := r.Read(v[0:1]) - if n > 0 && (err == nil || errors.Is(err, io.EOF)) { - return v[0], nil - } - if n > 0 && err != nil { - return v[0], err - } - if err != nil { - return 0, err - } - return v[0], nil -} - -func writeByte(w io.Writer, c byte) error { - v := [1]byte{c} - _, err := w.Write(v[0:1]) - return err -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go deleted file mode 100644 index c4497909..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "sync" -) - -type TSerializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -type TStruct interface { - Write(ctx context.Context, p TProtocol) error - Read(ctx context.Context, p TProtocol) error -} - -func NewTSerializer() *TSerializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolTransport(transport) - - return &TSerializer{ - Transport: transport, - Protocol: protocol, - } -} - -func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { - t.Transport.Reset() - - if err = msg.Write(ctx, t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - if err = t.Transport.Flush(ctx); err != nil { - return - } - - return t.Transport.String(), nil -} - -func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { - t.Transport.Reset() - - if err = msg.Write(ctx, t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - - if err = t.Transport.Flush(ctx); err != nil { - return - } - - b = append(b, t.Transport.Bytes()...) - return -} - -// TSerializerPool is the thread-safe version of TSerializer, it uses resource -// pool of TSerializer under the hood. -// -// It must be initialized with either NewTSerializerPool or -// NewTSerializerPoolSizeFactory. -type TSerializerPool struct { - pool sync.Pool -} - -// NewTSerializerPool creates a new TSerializerPool. -// -// NewTSerializer can be used as the arg here. -func NewTSerializerPool(f func() *TSerializer) *TSerializerPool { - return &TSerializerPool{ - pool: sync.Pool{ - New: func() interface{} { - return f() - }, - }, - } -} - -// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given -// size and protocol factory. -// -// Note that the size is not the limit. The TMemoryBuffer underneath can grow -// larger than that. It just dictates the initial size. -func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool { - return &TSerializerPool{ - pool: sync.Pool{ - New: func() interface{} { - transport := NewTMemoryBufferLen(size) - protocol := factory.GetProtocol(transport) - - return &TSerializer{ - Transport: transport, - Protocol: protocol, - } - }, - }, - } -} - -func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) { - s := t.pool.Get().(*TSerializer) - defer t.pool.Put(s) - return s.WriteString(ctx, msg) -} - -func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) { - s := t.pool.Get().(*TSerializer) - defer t.pool.Put(s) - return s.Write(ctx, msg) -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server.go b/vendor/github.com/apache/thrift/lib/go/thrift/server.go deleted file mode 100644 index f813fa35..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/server.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TServer interface { - ProcessorFactory() TProcessorFactory - ServerTransport() TServerTransport - InputTransportFactory() TTransportFactory - OutputTransportFactory() TTransportFactory - InputProtocolFactory() TProtocolFactory - OutputProtocolFactory() TProtocolFactory - - // Starts the server - Serve() error - // Stops the server. This is optional on a per-implementation basis. Not - // all servers are required to be cleanly stoppable. - Stop() error -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go deleted file mode 100644 index 7dd24ae3..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net" - "sync" - "time" -) - -type TServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - - // Protects the interrupted value to make it thread safe. - mu sync.RWMutex - interrupted bool -} - -func NewTServerSocket(listenAddr string) (*TServerSocket, error) { - return NewTServerSocketTimeout(listenAddr, 0) -} - -func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil -} - -// Creates a TServerSocket from a net.Addr -func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { - return &TServerSocket{addr: addr, clientTimeout: clientTimeout} -} - -func (p *TServerSocket) Listen() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.IsListening() { - return nil - } - l, err := net.Listen(p.addr.Network(), p.addr.String()) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TServerSocket) Accept() (TTransport, error) { - p.mu.RLock() - interrupted := p.interrupted - p.mu.RUnlock() - - if interrupted { - return nil, errTransportInterrupted - } - - p.mu.Lock() - listener := p.listener - p.mu.Unlock() - if listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - - conn, err := listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TServerSocket) Open() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TServerSocket) Addr() net.Addr { - if p.listener != nil { - return p.listener.Addr() - } - return p.addr -} - -func (p *TServerSocket) Close() error { - var err error - p.mu.Lock() - if p.IsListening() { - err = p.listener.Close() - p.listener = nil - } - p.mu.Unlock() - return err -} - -func (p *TServerSocket) Interrupt() error { - p.mu.Lock() - p.interrupted = true - p.mu.Unlock() - p.Close() - - return nil -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go deleted file mode 100644 index 51c40b64..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Server transport. Object which provides client transports. -type TServerTransport interface { - Listen() error - Accept() (TTransport, error) - Close() error - - // Optional method implementation. This signals to the server transport - // that it should break out of any accept() or listen() that it is currently - // blocked on. This method, if implemented, MUST be thread safe, as it may - // be called from a different thread context than the other TServerTransport - // methods. - Interrupt() error -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go deleted file mode 100644 index c9c450b8..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go +++ /dev/null @@ -1,1334 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "strconv" -) - -type _ParseContext int - -const ( - _CONTEXT_INVALID _ParseContext = iota - _CONTEXT_IN_TOPLEVEL // 1 - _CONTEXT_IN_LIST_FIRST // 2 - _CONTEXT_IN_LIST // 3 - _CONTEXT_IN_OBJECT_FIRST // 4 - _CONTEXT_IN_OBJECT_NEXT_KEY // 5 - _CONTEXT_IN_OBJECT_NEXT_VALUE // 6 -) - -func (p _ParseContext) String() string { - switch p { - case _CONTEXT_IN_TOPLEVEL: - return "TOPLEVEL" - case _CONTEXT_IN_LIST_FIRST: - return "LIST-FIRST" - case _CONTEXT_IN_LIST: - return "LIST" - case _CONTEXT_IN_OBJECT_FIRST: - return "OBJECT-FIRST" - case _CONTEXT_IN_OBJECT_NEXT_KEY: - return "OBJECT-NEXT-KEY" - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - return "OBJECT-NEXT-VALUE" - } - return "UNKNOWN-PARSE-CONTEXT" -} - -type jsonContextStack []_ParseContext - -func (s *jsonContextStack) push(v _ParseContext) { - *s = append(*s, v) -} - -func (s jsonContextStack) peek() (v _ParseContext, ok bool) { - l := len(s) - if l <= 0 { - return - } - return s[l-1], true -} - -func (s *jsonContextStack) pop() (v _ParseContext, ok bool) { - l := len(*s) - if l <= 0 { - return - } - v = (*s)[l-1] - *s = (*s)[0 : l-1] - return v, true -} - -var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack")) - -// Simple JSON protocol implementation for thrift. -// -// This protocol produces/consumes a simple output format -// suitable for parsing by scripting languages. It should not be -// confused with the full-featured TJSONProtocol. -// -type TSimpleJSONProtocol struct { - trans TTransport - - cfg *TConfiguration - - parseContextStack jsonContextStack - dumpContext jsonContextStack - - writer *bufio.Writer - reader *bufio.Reader -} - -// Deprecated: Use NewTSimpleJSONProtocolConf instead.: -func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { - return NewTSimpleJSONProtocolConf(t, &TConfiguration{ - noPropagation: true, - }) -} - -func NewTSimpleJSONProtocolConf(t TTransport, conf *TConfiguration) *TSimpleJSONProtocol { - PropagateTConfiguration(t, conf) - v := &TSimpleJSONProtocol{ - trans: t, - cfg: conf, - writer: bufio.NewWriter(t), - reader: bufio.NewReader(t), - } - v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL) - v.dumpContext.push(_CONTEXT_IN_TOPLEVEL) - return v -} - -// Factory -type TSimpleJSONProtocolFactory struct { - cfg *TConfiguration -} - -func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTSimpleJSONProtocolConf(trans, p.cfg) -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *TSimpleJSONProtocolFactory) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -// Deprecated: Use NewTSimpleJSONProtocolFactoryConf instead. -func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { - return &TSimpleJSONProtocolFactory{ - cfg: &TConfiguration{ - noPropagation: true, - }, - } -} - -func NewTSimpleJSONProtocolFactoryConf(conf *TConfiguration) *TSimpleJSONProtocolFactory { - return &TSimpleJSONProtocolFactory{ - cfg: conf, - } -} - -var ( - JSON_COMMA []byte - JSON_COLON []byte - JSON_LBRACE []byte - JSON_RBRACE []byte - JSON_LBRACKET []byte - JSON_RBRACKET []byte - JSON_QUOTE byte - JSON_QUOTE_BYTES []byte - JSON_NULL []byte - JSON_TRUE []byte - JSON_FALSE []byte - JSON_INFINITY string - JSON_NEGATIVE_INFINITY string - JSON_NAN string - JSON_INFINITY_BYTES []byte - JSON_NEGATIVE_INFINITY_BYTES []byte - JSON_NAN_BYTES []byte -) - -func init() { - JSON_COMMA = []byte{','} - JSON_COLON = []byte{':'} - JSON_LBRACE = []byte{'{'} - JSON_RBRACE = []byte{'}'} - JSON_LBRACKET = []byte{'['} - JSON_RBRACKET = []byte{']'} - JSON_QUOTE = '"' - JSON_QUOTE_BYTES = []byte{'"'} - JSON_NULL = []byte{'n', 'u', 'l', 'l'} - JSON_TRUE = []byte{'t', 'r', 'u', 'e'} - JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} - JSON_INFINITY = "Infinity" - JSON_NEGATIVE_INFINITY = "-Infinity" - JSON_NAN = "NaN" - JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NAN_BYTES = []byte{'N', 'a', 'N'} -} - -func jsonQuote(s string) string { - b, _ := json.Marshal(s) - s1 := string(b) - return s1 -} - -func jsonUnquote(s string) (string, bool) { - s1 := new(string) - err := json.Unmarshal([]byte(s), s1) - return *s1, err == nil -} - -func mismatch(expected, actual string) error { - return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) -} - -func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteString(ctx, name); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(ctx, seqId); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error { - return p.OutputObjectEnd() -} - -func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error { - if e := p.WriteString(ctx, name); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil } - -func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(keyType)); e != nil { - return e - } - if e := p.WriteByte(ctx, int8(valueType)); e != nil { - return e - } - return p.WriteI32(ctx, int32(size)) -} - -func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error { - return p.OutputBool(b) -} - -func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error { - return p.WriteI32(ctx, int32(b)) -} - -func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error { - return p.WriteI32(ctx, int32(v)) -} - -func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error { - return p.OutputF64(v) -} - -func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error { - return p.OutputString(v) -} - -func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - if name, err = p.ReadString(ctx); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte(ctx) - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(ctx); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error { - return p.ParseObjectEnd() -} - -func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) { - if err := p.ParsePreValue(); err != nil { - return "", STOP, 0, err - } - b, _ := p.reader.Peek(1) - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return "", STOP, 0, nil - case JSON_QUOTE: - p.reader.ReadByte() - name, err := p.ParseStringBody() - // simplejson is not meant to be read back into thrift - // - see http://wiki.apache.org/thrift/ThriftUsageJava - // - use JSON instead - if err != nil { - return name, STOP, 0, err - } - return name, STOP, -1, p.ParsePostValue() - } - e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) - return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return "", STOP, 0, NewTProtocolException(io.EOF) -} - -func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error { - return nil -} - -func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - bKeyType, e := p.ReadByte(ctx) - keyType = TType(bKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - bValueType, e := p.ReadByte(ctx) - valueType = TType(bValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, err := p.ReadI64(ctx) - if err != nil { - return keyType, valueType, 0, err - } - err = checkSizeForProtocol(int32(size), p.cfg) - if err != nil { - return keyType, valueType, 0, err - } - size = int(iSize) - return keyType, valueType, size, err -} - -func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) { - var value bool - - if err := p.ParsePreValue(); err != nil { - return value, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 { - switch f[0] { - case JSON_TRUE[0]: - b := make([]byte, len(JSON_TRUE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_TRUE) { - value = true - } else { - e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_FALSE[0]: - b := make([]byte, len(JSON_FALSE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_FALSE) { - value = false - } else { - e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_NULL[0]: - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_NULL) { - value = false - } else { - e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - default: - e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - return value, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) { - v, err := p.ReadI64(ctx) - return int8(v), err -} - -func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) { - v, err := p.ReadI64(ctx) - return int16(v), err -} - -func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) { - v, err := p.ReadI64(ctx) - return int32(v), err -} - -func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.writer.Flush()) -} - -func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) { - return SkipDefaultDepth(ctx, p, fieldType) -} - -func (p *TSimpleJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TSimpleJSONProtocol) OutputPreValue() error { - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: - if _, e := p.write(JSON_COMMA); e != nil { - return NewTProtocolException(e) - } - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if _, e := p.write(JSON_COLON); e != nil { - return NewTProtocolException(e) - } - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputPostValue() error { - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_LIST) - case _CONTEXT_IN_OBJECT_FIRST: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) - case _CONTEXT_IN_OBJECT_NEXT_KEY: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.dumpContext.pop() - p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY) - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputBool(value bool) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if value { - v = string(JSON_TRUE) - } else { - v = string(JSON_FALSE) - } - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputNull() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_NULL); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputF64(value float64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if math.IsNaN(value) { - v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) - } else if math.IsInf(value, 1) { - v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) - } else if math.IsInf(value, -1) { - v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) - } else { - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - v = strconv.FormatFloat(value, 'g', -1, 64) - switch cxt { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = string(JSON_QUOTE) + v + string(JSON_QUOTE) - } - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputI64(value int64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - cxt, ok := p.dumpContext.peek() - if !ok { - return errEmptyJSONContextStack - } - v := strconv.FormatInt(value, 10) - switch cxt { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputString(s string) error { - if e := p.OutputPreValue(); e != nil { - return e - } - if e := p.OutputStringData(jsonQuote(s)); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputStringData(s string) error { - _, e := p.write([]byte(s)) - return NewTProtocolException(e) -} - -func (p *TSimpleJSONProtocol) OutputObjectBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACE); e != nil { - return NewTProtocolException(e) - } - p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST) - return nil -} - -func (p *TSimpleJSONProtocol) OutputObjectEnd() error { - if _, e := p.write(JSON_RBRACE); e != nil { - return NewTProtocolException(e) - } - _, ok := p.dumpContext.pop() - if !ok { - return errEmptyJSONContextStack - } - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputListBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACKET); e != nil { - return NewTProtocolException(e) - } - p.dumpContext.push(_CONTEXT_IN_LIST_FIRST) - return nil -} - -func (p *TSimpleJSONProtocol) OutputListEnd() error { - if _, e := p.write(JSON_RBRACKET); e != nil { - return NewTProtocolException(e) - } - _, ok := p.dumpContext.pop() - if !ok { - return errEmptyJSONContextStack - } - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.OutputI64(int64(elemType)); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePreValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt, ok := p.parseContextStack.peek() - if !ok { - return errEmptyJSONContextStack - } - b, _ := p.reader.Peek(1) - switch cxt { - case _CONTEXT_IN_LIST: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACKET[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - case _CONTEXT_IN_OBJECT_NEXT_KEY: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if len(b) > 0 { - switch b[0] { - case JSON_COLON[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePostValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt, ok := p.parseContextStack.peek() - if !ok { - return errEmptyJSONContextStack - } - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.parseContextStack.pop() - p.parseContextStack.push(_CONTEXT_IN_LIST) - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - p.parseContextStack.pop() - p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE) - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.parseContextStack.pop() - p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY) - } - return nil -} - -func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { - for { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return nil - } - switch b[0] { - case ' ', '\r', '\n', '\t': - p.reader.ReadByte() - continue - } - break - } - return nil -} - -func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - v, ok := jsonUnquote(string(JSON_QUOTE) + line) - if !ok { - return "", NewTProtocolException(err) - } - return v, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - str := string(JSON_QUOTE) + line + s - v, ok := jsonUnquote(str) - if !ok { - e := fmt.Errorf("Unable to parse as JSON string %s", str) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - return line, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - v := line + s - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { - line, err := p.reader.ReadBytes(JSON_QUOTE) - if err != nil { - return line, NewTProtocolException(err) - } - line2 := line[0 : len(line)-1] - l := len(line2) - if (l % 4) != 0 { - pad := 4 - (l % 4) - fill := [...]byte{'=', '=', '='} - line2 = append(line2, fill[:pad]...) - l = len(line2) - } - output := make([]byte, base64.StdEncoding.DecodedLen(l)) - n, err := base64.StdEncoding.Decode(output, line2) - return output[0:n], NewTProtocolException(err) -} - -func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value int64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Int64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value float64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Float64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { - if err := p.ParsePreValue(); err != nil { - return false, err - } - var b []byte - b, err := p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) > 0 && b[0] == JSON_LBRACE[0] { - p.reader.ReadByte() - p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST) - return false, nil - } else if p.safePeekContains(JSON_NULL) { - return true, nil - } - e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) - return false, NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TSimpleJSONProtocol) ParseObjectEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt, _ := p.parseContextStack.peek() - if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { - e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACE[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', '}': - // do nothing - } - } - p.parseContextStack.pop() - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { - if e := p.ParsePreValue(); e != nil { - return false, e - } - var b []byte - b, err = p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { - p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST) - p.reader.ReadByte() - isNull = false - } else if p.safePeekContains(JSON_NULL) { - isNull = true - } else { - err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) - } - return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) -} - -func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - bElemType, _, err := p.ParseI64() - elemType = TType(bElemType) - if err != nil { - return elemType, size, err - } - nSize, _, err := p.ParseI64() - if err != nil { - return elemType, 0, err - } - err = checkSizeForProtocol(int32(nSize), p.cfg) - if err != nil { - return elemType, 0, err - } - size = int(nSize) - return elemType, size, nil -} - -func (p *TSimpleJSONProtocol) ParseListEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt, _ := p.parseContextStack.peek() - if cxt != _CONTEXT_IN_LIST { - e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACKET[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): - // do nothing - } - } - p.parseContextStack.pop() - if cxt, ok := p.parseContextStack.peek(); !ok { - return errEmptyJSONContextStack - } else if cxt == _CONTEXT_IN_TOPLEVEL { - return nil - } - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { - cont := true - for cont { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return false, nil - } - switch b[0] { - default: - return false, nil - case JSON_NULL[0]: - cont = false - case ' ', '\n', '\r', '\t': - p.reader.ReadByte() - } - } - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - return true, nil - } - return false, nil -} - -func (p *TSimpleJSONProtocol) readQuoteIfNext() { - b, _ := p.reader.Peek(1) - if len(b) > 0 && b[0] == JSON_QUOTE { - p.reader.ReadByte() - } -} - -func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { - isNull, err := p.readIfNull() - if isNull || err != nil { - return NUMERIC_NULL, err - } - hasDecimalPoint := false - nextCanBeSign := true - hasE := false - MAX_LEN := 40 - buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) - continueFor := true - inQuotes := false - for continueFor { - c, err := p.reader.ReadByte() - if err != nil { - if err == io.EOF { - break - } - return NUMERIC_NULL, NewTProtocolException(err) - } - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - buf.WriteByte(c) - nextCanBeSign = false - case '.': - if hasDecimalPoint { - e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if hasE { - e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasDecimalPoint, nextCanBeSign = true, false - case 'e', 'E': - if hasE { - e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasE, nextCanBeSign = true, true - case '-', '+': - if !nextCanBeSign { - e := fmt.Errorf("Negative sign within number") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - nextCanBeSign = false - case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: - p.reader.UnreadByte() - continueFor = false - case JSON_NAN[0]: - if buf.Len() == 0 { - buffer := make([]byte, len(JSON_NAN)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NAN != string(buffer) { - e := mismatch(JSON_NAN, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NAN, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_INFINITY[0]: - if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { - buffer := make([]byte, len(JSON_INFINITY)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_INFINITY != string(buffer) { - e := mismatch(JSON_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return INFINITY, nil - } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { - buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) - buffer[0] = JSON_NEGATIVE_INFINITY[0] - buffer[1] = c - _, e := p.reader.Read(buffer[2:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NEGATIVE_INFINITY != string(buffer) { - e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NEGATIVE_INFINITY, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_QUOTE: - if !inQuotes { - inQuotes = true - } - default: - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - if buf.Len() == 0 { - e := fmt.Errorf("Unable to parse number from empty string ''") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return NewNumericFromJSONString(buf.String(), false), nil -} - -// Safely peeks into the buffer, reading only what is necessary -func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { - for i := 0; i < len(b); i++ { - a, _ := p.reader.Peek(i + 1) - if len(a) < (i+1) || a[i] != b[i] { - return false - } - } - return true -} - -// Reset the context stack to its initial state. -func (p *TSimpleJSONProtocol) resetContextStack() { - p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL} - p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL} -} - -func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { - n, err := p.writer.Write(b) - if err != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - } - return n, err -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(p.trans, conf) - p.cfg = conf -} - -var ( - _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil) - _ TConfigurationSetter = (*TSimpleJSONProtocolFactory)(nil) -) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go deleted file mode 100644 index 02863ec4..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go +++ /dev/null @@ -1,336 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "fmt" - "io" - "sync" - "sync/atomic" - "time" -) - -// ErrAbandonRequest is a special error server handler implementations can -// return to indicate that the request has been abandoned. -// -// TSimpleServer will check for this error, and close the client connection -// instead of writing the response/error back to the client. -// -// It shall only be used when the server handler implementation know that the -// client already abandoned the request (by checking that the passed in context -// is already canceled, for example). -var ErrAbandonRequest = errors.New("request abandoned") - -// ServerConnectivityCheckInterval defines the ticker interval used by -// connectivity check in thrift compiled TProcessorFunc implementations. -// -// It's defined as a variable instead of constant, so that thrift server -// implementations can change its value to control the behavior. -// -// If it's changed to <=0, the feature will be disabled. -var ServerConnectivityCheckInterval = time.Millisecond * 5 - -/* - * This is not a typical TSimpleServer as it is not blocked after accept a socket. - * It is more like a TThreadedServer that can handle different connections in different goroutines. - * This will work if golang user implements a conn-pool like thing in client side. - */ -type TSimpleServer struct { - closed int32 - wg sync.WaitGroup - mu sync.Mutex - - processorFactory TProcessorFactory - serverTransport TServerTransport - inputTransportFactory TTransportFactory - outputTransportFactory TTransportFactory - inputProtocolFactory TProtocolFactory - outputProtocolFactory TProtocolFactory - - // Headers to auto forward in THeaderProtocol - forwardHeaders []string - - logger Logger -} - -func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) -} - -func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory4(NewTProcessorFactory(processor), - serverTransport, - transportFactory, - protocolFactory, - ) -} - -func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(NewTProcessorFactory(processor), - serverTransport, - inputTransportFactory, - outputTransportFactory, - inputProtocolFactory, - outputProtocolFactory, - ) -} - -func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - NewTTransportFactory(), - NewTTransportFactory(), - NewTBinaryProtocolFactoryDefault(), - NewTBinaryProtocolFactoryDefault(), - ) -} - -func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - transportFactory, - transportFactory, - protocolFactory, - protocolFactory, - ) -} - -func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return &TSimpleServer{ - processorFactory: processorFactory, - serverTransport: serverTransport, - inputTransportFactory: inputTransportFactory, - outputTransportFactory: outputTransportFactory, - inputProtocolFactory: inputProtocolFactory, - outputProtocolFactory: outputProtocolFactory, - } -} - -func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { - return p.processorFactory -} - -func (p *TSimpleServer) ServerTransport() TServerTransport { - return p.serverTransport -} - -func (p *TSimpleServer) InputTransportFactory() TTransportFactory { - return p.inputTransportFactory -} - -func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { - return p.outputTransportFactory -} - -func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { - return p.inputProtocolFactory -} - -func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { - return p.outputProtocolFactory -} - -func (p *TSimpleServer) Listen() error { - return p.serverTransport.Listen() -} - -// SetForwardHeaders sets the list of header keys that will be auto forwarded -// while using THeaderProtocol. -// -// "forward" means that when the server is also a client to other upstream -// thrift servers, the context object user gets in the processor functions will -// have both read and write headers set, with write headers being forwarded. -// Users can always override the write headers by calling SetWriteHeaderList -// before calling thrift client functions. -func (p *TSimpleServer) SetForwardHeaders(headers []string) { - size := len(headers) - if size == 0 { - p.forwardHeaders = nil - return - } - - keys := make([]string, size) - copy(keys, headers) - p.forwardHeaders = keys -} - -// SetLogger sets the logger used by this TSimpleServer. -// -// If no logger was set before Serve is called, a default logger using standard -// log library will be used. -func (p *TSimpleServer) SetLogger(logger Logger) { - p.logger = logger -} - -func (p *TSimpleServer) innerAccept() (int32, error) { - client, err := p.serverTransport.Accept() - p.mu.Lock() - defer p.mu.Unlock() - closed := atomic.LoadInt32(&p.closed) - if closed != 0 { - return closed, nil - } - if err != nil { - return 0, err - } - if client != nil { - p.wg.Add(1) - go func() { - defer p.wg.Done() - if err := p.processRequests(client); err != nil { - p.logger(fmt.Sprintf("error processing request: %v", err)) - } - }() - } - return 0, nil -} - -func (p *TSimpleServer) AcceptLoop() error { - for { - closed, err := p.innerAccept() - if err != nil { - return err - } - if closed != 0 { - return nil - } - } -} - -func (p *TSimpleServer) Serve() error { - p.logger = fallbackLogger(p.logger) - - err := p.Listen() - if err != nil { - return err - } - p.AcceptLoop() - return nil -} - -func (p *TSimpleServer) Stop() error { - p.mu.Lock() - defer p.mu.Unlock() - if atomic.LoadInt32(&p.closed) != 0 { - return nil - } - atomic.StoreInt32(&p.closed, 1) - p.serverTransport.Interrupt() - p.wg.Wait() - return nil -} - -// If err is actually EOF or NOT_OPEN, return nil, otherwise return err as-is. -func treatEOFErrorsAsNil(err error) error { - if err == nil { - return nil - } - if errors.Is(err, io.EOF) { - return nil - } - var te TTransportException - // NOT_OPEN returned by processor.Process is usually caused by client - // abandoning the connection (e.g. client side time out, or just client - // closes connections from the pool because of shutting down). - // Those logs will be very noisy, so suppress those logs as well. - if errors.As(err, &te) && (te.TypeId() == END_OF_FILE || te.TypeId() == NOT_OPEN) { - return nil - } - return err -} - -func (p *TSimpleServer) processRequests(client TTransport) (err error) { - defer func() { - err = treatEOFErrorsAsNil(err) - }() - - processor := p.processorFactory.GetProcessor(client) - inputTransport, err := p.inputTransportFactory.GetTransport(client) - if err != nil { - return err - } - inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) - var outputTransport TTransport - var outputProtocol TProtocol - - // for THeaderProtocol, we must use the same protocol instance for - // input and output so that the response is in the same dialect that - // the server detected the request was in. - headerProtocol, ok := inputProtocol.(*THeaderProtocol) - if ok { - outputProtocol = inputProtocol - } else { - oTrans, err := p.outputTransportFactory.GetTransport(client) - if err != nil { - return err - } - outputTransport = oTrans - outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport) - } - - if inputTransport != nil { - defer inputTransport.Close() - } - if outputTransport != nil { - defer outputTransport.Close() - } - for { - if atomic.LoadInt32(&p.closed) != 0 { - return nil - } - - ctx := SetResponseHelper( - defaultCtx, - TResponseHelper{ - THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol), - }, - ) - if headerProtocol != nil { - // We need to call ReadFrame here, otherwise we won't - // get any headers on the AddReadTHeaderToContext call. - // - // ReadFrame is safe to be called multiple times so it - // won't break when it's called again later when we - // actually start to read the message. - if err := headerProtocol.ReadFrame(ctx); err != nil { - return err - } - ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders()) - ctx = SetWriteHeaderList(ctx, p.forwardHeaders) - } - - ok, err := processor.Process(ctx, inputProtocol, outputProtocol) - if errors.Is(err, ErrAbandonRequest) { - return client.Close() - } - if errors.As(err, new(TTransportException)) && err != nil { - return err - } - var tae TApplicationException - if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD { - continue - } - if !ok { - break - } - } - return nil -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket.go deleted file mode 100644 index cba7c0f7..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/socket.go +++ /dev/null @@ -1,241 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "net" - "time" -) - -type TSocket struct { - conn *socketConn - addr net.Addr - cfg *TConfiguration -} - -// tcpAddr is a naive implementation of net.Addr that does nothing extra. -type tcpAddr string - -var _ net.Addr = tcpAddr("") - -func (ta tcpAddr) Network() string { - return "tcp" -} - -func (ta tcpAddr) String() string { - return string(ta) -} - -// Deprecated: Use NewTSocketConf instead. -func NewTSocket(hostPort string) (*TSocket, error) { - return NewTSocketConf(hostPort, &TConfiguration{ - noPropagation: true, - }), nil -} - -// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port. -// -// Example: -// -// trans, err := thrift.NewTSocketConf("localhost:9090", &TConfiguration{ -// ConnectTimeout: time.Second, // Use 0 for no timeout -// SocketTimeout: time.Second, // Use 0 for no timeout -// }) -func NewTSocketConf(hostPort string, conf *TConfiguration) *TSocket { - return NewTSocketFromAddrConf(tcpAddr(hostPort), conf) -} - -// Deprecated: Use NewTSocketConf instead. -func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) { - return NewTSocketConf(hostPort, &TConfiguration{ - ConnectTimeout: connTimeout, - SocketTimeout: soTimeout, - - noPropagation: true, - }), nil -} - -// NewTSocketFromAddrConf creates a TSocket from a net.Addr -func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket { - return &TSocket{ - addr: addr, - cfg: conf, - } -} - -// Deprecated: Use NewTSocketFromAddrConf instead. -func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket { - return NewTSocketFromAddrConf(addr, &TConfiguration{ - ConnectTimeout: connTimeout, - SocketTimeout: soTimeout, - - noPropagation: true, - }) -} - -// NewTSocketFromConnConf creates a TSocket from an existing net.Conn. -func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket { - return &TSocket{ - conn: wrapSocketConn(conn), - addr: conn.RemoteAddr(), - cfg: conf, - } -} - -// Deprecated: Use NewTSocketFromConnConf instead. -func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket { - return NewTSocketFromConnConf(conn, &TConfiguration{ - SocketTimeout: socketTimeout, - - noPropagation: true, - }) -} - -// SetTConfiguration implements TConfigurationSetter. -// -// It can be used to set connect and socket timeouts. -func (p *TSocket) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -// Sets the connect timeout -func (p *TSocket) SetConnTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{ - noPropagation: true, - } - } - p.cfg.ConnectTimeout = timeout - return nil -} - -// Sets the socket timeout -func (p *TSocket) SetSocketTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{ - noPropagation: true, - } - } - p.cfg.SocketTimeout = timeout - return nil -} - -func (p *TSocket) pushDeadline(read, write bool) { - var t time.Time - if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { - t = time.Now().Add(time.Duration(timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSocket) Open() error { - if p.conn.isValid() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - var err error - if p.conn, err = createSocketConnFromReturn(net.DialTimeout( - p.addr.Network(), - p.addr.String(), - p.cfg.GetConnectTimeout(), - )); err != nil { - return &tTransportException{ - typeId: NOT_OPEN, - err: err, - msg: err.Error(), - } - } - p.addr = p.conn.RemoteAddr() - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSocket) IsOpen() bool { - return p.conn.IsOpen() -} - -// Closes the socket. -func (p *TSocket) Close() error { - return p.conn.Close() -} - -//Returns the remote address of the socket. -func (p *TSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSocket) Read(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between - // p.pushDeadline and p.conn.Read could cause the deadline set inside - // p.pushDeadline being reset, thus need to be avoided. - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSocket) Write(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSocket) Interrupt() error { - if !p.conn.isValid() { - return nil - } - return p.conn.Close() -} - -func (p *TSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -var _ TConfigurationSetter = (*TSocket)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket_conn.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket_conn.go deleted file mode 100644 index bbb5b7d1..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/socket_conn.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "net" - "sync/atomic" -) - -// socketConn is a wrapped net.Conn that tries to do connectivity check. -type socketConn struct { - net.Conn - - buffer [1]byte - closed int32 -} - -var _ net.Conn = (*socketConn)(nil) - -// createSocketConnFromReturn is a language sugar to help create socketConn from -// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc. -func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) { - if err != nil { - return nil, err - } - return &socketConn{ - Conn: conn, - }, nil -} - -// wrapSocketConn wraps an existing net.Conn into *socketConn. -func wrapSocketConn(conn net.Conn) *socketConn { - // In case conn is already wrapped, - // return it as-is and avoid double wrapping. - if sc, ok := conn.(*socketConn); ok { - return sc - } - - return &socketConn{ - Conn: conn, - } -} - -// isValid checks whether there's a valid connection. -// -// It's nil safe, and returns false if sc itself is nil, or if the underlying -// connection is nil. -// -// It's the same as the previous implementation of TSocket.IsOpen and -// TSSLSocket.IsOpen before we added connectivity check. -func (sc *socketConn) isValid() bool { - return sc != nil && sc.Conn != nil && atomic.LoadInt32(&sc.closed) == 0 -} - -// IsOpen checks whether the connection is open. -// -// It's nil safe, and returns false if sc itself is nil, or if the underlying -// connection is nil. -// -// Otherwise, it tries to do a connectivity check and returns the result. -// -// It also has the side effect of resetting the previously set read deadline on -// the socket. As a result, it shouldn't be called between setting read deadline -// and doing actual read. -func (sc *socketConn) IsOpen() bool { - if !sc.isValid() { - return false - } - if err := sc.checkConn(); err != nil { - if !errors.Is(err, net.ErrClosed) { - // The connectivity check failed and the error is not - // that the connection is already closed, we need to - // close the connection explicitly here to avoid - // connection leaks. - sc.Close() - } - return false - } - return true -} - -// Read implements io.Reader. -// -// On Windows, it behaves the same as the underlying net.Conn.Read. -// -// On non-Windows, it treats len(p) == 0 as a connectivity check instead of -// readability check, which means instead of blocking until there's something to -// read (readability check), or always return (0, nil) (the default behavior of -// go's stdlib implementation on non-Windows), it never blocks, and will return -// an error if the connection is lost. -func (sc *socketConn) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, sc.read0() - } - - return sc.Conn.Read(p) -} - -func (sc *socketConn) Close() error { - if !sc.isValid() { - // Already closed - return net.ErrClosed - } - atomic.StoreInt32(&sc.closed, 1) - return sc.Conn.Close() -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket_non_unix_conn.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket_non_unix_conn.go deleted file mode 100644 index 75ed91dd..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/socket_non_unix_conn.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build windows || wasm -// +build windows wasm - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -func (sc *socketConn) read0() error { - // On non-unix platforms, we fallback to the default behavior of reading 0 bytes. - var p []byte - _, err := sc.Conn.Read(p) - return err -} - -func (sc *socketConn) checkConn() error { - // On non-unix platforms, we always return nil for this check. - return nil -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket_unix_conn.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket_unix_conn.go deleted file mode 100644 index ac0dce9e..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/socket_unix_conn.go +++ /dev/null @@ -1,84 +0,0 @@ -//go:build !windows && !wasm -// +build !windows,!wasm - -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" - "syscall" - "time" -) - -// We rely on this variable to be the zero time, -// but define it as global variable to avoid repetitive allocations. -// Please DO NOT mutate this variable in any way. -var zeroTime time.Time - -func (sc *socketConn) read0() error { - return sc.checkConn() -} - -func (sc *socketConn) checkConn() error { - syscallConn, ok := sc.Conn.(syscall.Conn) - if !ok { - // No way to check, return nil - return nil - } - - // The reading about to be done here is non-blocking so we don't really - // need a read deadline. We just need to clear the previously set read - // deadline, if any. - sc.Conn.SetReadDeadline(zeroTime) - - rc, err := syscallConn.SyscallConn() - if err != nil { - return err - } - - var n int - - if readErr := rc.Read(func(fd uintptr) bool { - n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT) - return true - }); readErr != nil { - return readErr - } - - if n > 0 { - // We got something, which means we are good - return nil - } - - if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) { - // This means the connection is still open but we don't have - // anything to read right now. - return nil - } - - if err != nil { - return err - } - - // At this point, it means the other side already closed the connection. - return io.EOF -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go deleted file mode 100644 index 907afca3..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "crypto/tls" - "net" - "time" -) - -type TSSLServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - interrupted bool - cfg *tls.Config -} - -func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { - return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) -} - -func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { - if cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS10 - } - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil -} - -func (p *TSSLServerSocket) Listen() error { - if p.IsListening() { - return nil - } - l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TSSLServerSocket) Accept() (TTransport, error) { - if p.interrupted { - return nil, errTransportInterrupted - } - if p.listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - conn, err := p.listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TSSLServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLServerSocket) Open() error { - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TSSLServerSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSSLServerSocket) Close() error { - defer func() { - p.listener = nil - }() - if p.IsListening() { - return p.listener.Close() - } - return nil -} - -func (p *TSSLServerSocket) Interrupt() error { - p.interrupted = true - return nil -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go deleted file mode 100644 index d7ba415e..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go +++ /dev/null @@ -1,262 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "crypto/tls" - "net" - "time" -) - -type TSSLSocket struct { - conn *socketConn - // hostPort contains host:port (e.g. "asdf.com:12345"). The field is - // only valid if addr is nil. - hostPort string - // addr is nil when hostPort is not "", and is only used when the - // TSSLSocket is constructed from a net.Addr. - addr net.Addr - - cfg *TConfiguration -} - -// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port. -// -// Example: -// -// trans := thrift.NewTSSLSocketConf("localhost:9090", &TConfiguration{ -// ConnectTimeout: time.Second, // Use 0 for no timeout -// SocketTimeout: time.Second, // Use 0 for no timeout -// -// TLSConfig: &tls.Config{ -// // Fill in tls config here. -// } -// }) -func NewTSSLSocketConf(hostPort string, conf *TConfiguration) *TSSLSocket { - if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS10 - } - return &TSSLSocket{ - hostPort: hostPort, - cfg: conf, - } -} - -// Deprecated: Use NewTSSLSocketConf instead. -func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { - return NewTSSLSocketConf(hostPort, &TConfiguration{ - TLSConfig: cfg, - - noPropagation: true, - }), nil -} - -// Deprecated: Use NewTSSLSocketConf instead. -func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) { - return NewTSSLSocketConf(hostPort, &TConfiguration{ - ConnectTimeout: connectTimeout, - SocketTimeout: socketTimeout, - TLSConfig: cfg, - - noPropagation: true, - }), nil -} - -// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr. -func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket { - return &TSSLSocket{ - addr: addr, - cfg: conf, - } -} - -// Deprecated: Use NewTSSLSocketFromAddrConf instead. -func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket { - return NewTSSLSocketFromAddrConf(addr, &TConfiguration{ - ConnectTimeout: connectTimeout, - SocketTimeout: socketTimeout, - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn. -func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket { - return &TSSLSocket{ - conn: wrapSocketConn(conn), - addr: conn.RemoteAddr(), - cfg: conf, - } -} - -// Deprecated: Use NewTSSLSocketFromConnConf instead. -func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket { - return NewTSSLSocketFromConnConf(conn, &TConfiguration{ - SocketTimeout: socketTimeout, - TLSConfig: cfg, - - noPropagation: true, - }) -} - -// SetTConfiguration implements TConfigurationSetter. -// -// It can be used to change connect and socket timeouts. -func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) { - p.cfg = conf -} - -// Sets the connect timeout -func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{} - } - p.cfg.ConnectTimeout = timeout - return nil -} - -// Sets the socket timeout -func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error { - if p.cfg == nil { - p.cfg = &TConfiguration{} - } - p.cfg.SocketTimeout = timeout - return nil -} - -func (p *TSSLSocket) pushDeadline(read, write bool) { - var t time.Time - if timeout := p.cfg.GetSocketTimeout(); timeout > 0 { - t = time.Now().Add(time.Duration(timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLSocket) Open() error { - var err error - // If we have a hostname, we need to pass the hostname to tls.Dial for - // certificate hostname checks. - if p.hostPort != "" { - if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( - &net.Dialer{ - Timeout: p.cfg.GetConnectTimeout(), - }, - "tcp", - p.hostPort, - p.cfg.GetTLSConfig(), - )); err != nil { - return &tTransportException{ - typeId: NOT_OPEN, - err: err, - msg: err.Error(), - } - } - } else { - if p.conn.isValid() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer( - &net.Dialer{ - Timeout: p.cfg.GetConnectTimeout(), - }, - p.addr.Network(), - p.addr.String(), - p.cfg.GetTLSConfig(), - )); err != nil { - return &tTransportException{ - typeId: NOT_OPEN, - err: err, - msg: err.Error(), - } - } - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSSLSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSSLSocket) IsOpen() bool { - return p.conn.IsOpen() -} - -// Closes the socket. -func (p *TSSLSocket) Close() error { - return p.conn.Close() -} - -func (p *TSSLSocket) Read(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - // NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between - // p.pushDeadline and p.conn.Read could cause the deadline set inside - // p.pushDeadline being reset, thus need to be avoided. - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSSLSocket) Write(buf []byte) (int, error) { - if !p.conn.isValid() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSSLSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSSLSocket) Interrupt() error { - if !p.conn.isValid() { - return nil - } - return p.conn.Close() -} - -func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the truth is, we just don't know unless framed is used -} - -var _ TConfigurationSetter = (*TSSLSocket)(nil) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/staticcheck.conf b/vendor/github.com/apache/thrift/lib/go/thrift/staticcheck.conf deleted file mode 100644 index 2ffe850b..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/staticcheck.conf +++ /dev/null @@ -1,4 +0,0 @@ -checks = [ - "inherit", - "-ST1005", # To be consistent with other language libraries we need capitalized error messages. -] diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport.go deleted file mode 100644 index ba2738a8..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/transport.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" - "io" -) - -var errTransportInterrupted = errors.New("Transport Interrupted") - -type Flusher interface { - Flush() (err error) -} - -type ContextFlusher interface { - Flush(ctx context.Context) (err error) -} - -type ReadSizeProvider interface { - RemainingBytes() (num_bytes uint64) -} - -// Encapsulates the I/O layer -type TTransport interface { - io.ReadWriteCloser - ContextFlusher - ReadSizeProvider - - // Opens the transport for communication - Open() error - - // Returns true if the transport is open - IsOpen() bool -} - -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// This is "enchanced" transport with extra capabilities. You need to use one of these -// to construct protocol. -// Notably, TSocket does not implement this interface, and it is always a mistake to use -// TSocket directly in protocol. -type TRichTransport interface { - io.ReadWriter - io.ByteReader - io.ByteWriter - stringWriter - ContextFlusher - ReadSizeProvider -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go deleted file mode 100644 index a51510ed..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -type timeoutable interface { - Timeout() bool -} - -// Thrift Transport exception -type TTransportException interface { - TException - TypeId() int - Err() error -} - -const ( - UNKNOWN_TRANSPORT_EXCEPTION = 0 - NOT_OPEN = 1 - ALREADY_OPEN = 2 - TIMED_OUT = 3 - END_OF_FILE = 4 -) - -type tTransportException struct { - typeId int - err error - msg string -} - -var _ TTransportException = (*tTransportException)(nil) - -func (tTransportException) TExceptionType() TExceptionType { - return TExceptionTypeTransport -} - -func (p *tTransportException) TypeId() int { - return p.typeId -} - -func (p *tTransportException) Error() string { - return p.msg -} - -func (p *tTransportException) Err() error { - return p.err -} - -func (p *tTransportException) Unwrap() error { - return p.err -} - -func (p *tTransportException) Timeout() bool { - return p.typeId == TIMED_OUT || isTimeoutError(p.err) -} - -func NewTTransportException(t int, e string) TTransportException { - return &tTransportException{ - typeId: t, - err: errors.New(e), - msg: e, - } -} - -func NewTTransportExceptionFromError(e error) TTransportException { - if e == nil { - return nil - } - - if t, ok := e.(TTransportException); ok { - return t - } - - te := &tTransportException{ - typeId: UNKNOWN_TRANSPORT_EXCEPTION, - err: e, - msg: e.Error(), - } - - if isTimeoutError(e) { - te.typeId = TIMED_OUT - return te - } - - if errors.Is(e, io.EOF) { - te.typeId = END_OF_FILE - return te - } - - return te -} - -func prependTTransportException(prepend string, e TTransportException) TTransportException { - return &tTransportException{ - typeId: e.TypeId(), - err: e, - msg: prepend + e.Error(), - } -} - -// isTimeoutError returns true when err is an error caused by timeout. -// -// Note that this also includes TTransportException wrapped timeout errors. -func isTimeoutError(err error) bool { - var t timeoutable - if errors.As(err, &t) { - return t.Timeout() - } - return false -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go deleted file mode 100644 index c8058079..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory class used to create wrapped instance of Transports. -// This is used primarily in servers, which get Transports from -// a ServerTransport and then may want to mutate them (i.e. create -// a BufferedTransport from the underlying base transport) -type TTransportFactory interface { - GetTransport(trans TTransport) (TTransport, error) -} - -type tTransportFactory struct{} - -// Return a wrapped instance of the base Transport. -func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - return trans, nil -} - -func NewTTransportFactory() TTransportFactory { - return &tTransportFactory{} -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/type.go b/vendor/github.com/apache/thrift/lib/go/thrift/type.go deleted file mode 100644 index 4292ffca..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/type.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Type constants in the Thrift protocol -type TType byte - -const ( - STOP = 0 - VOID = 1 - BOOL = 2 - BYTE = 3 - I08 = 3 - DOUBLE = 4 - I16 = 6 - I32 = 8 - I64 = 10 - STRING = 11 - UTF7 = 11 - STRUCT = 12 - MAP = 13 - SET = 14 - LIST = 15 - UTF8 = 16 - UTF16 = 17 - //BINARY = 18 wrong and unusued -) - -var typeNames = map[int]string{ - STOP: "STOP", - VOID: "VOID", - BOOL: "BOOL", - BYTE: "BYTE", - DOUBLE: "DOUBLE", - I16: "I16", - I32: "I32", - I64: "I64", - STRING: "STRING", - STRUCT: "STRUCT", - MAP: "MAP", - SET: "SET", - LIST: "LIST", - UTF8: "UTF8", - UTF16: "UTF16", -} - -func (p TType) String() string { - if s, ok := typeNames[int(p)]; ok { - return s - } - return "Unknown" -} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go deleted file mode 100644 index 259943a6..00000000 --- a/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go +++ /dev/null @@ -1,137 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. - */ - -package thrift - -import ( - "compress/zlib" - "context" - "io" -) - -// TZlibTransportFactory is a factory for TZlibTransport instances -type TZlibTransportFactory struct { - level int - factory TTransportFactory -} - -// TZlibTransport is a TTransport implementation that makes use of zlib compression. -type TZlibTransport struct { - reader io.ReadCloser - transport TTransport - writer *zlib.Writer -} - -// GetTransport constructs a new instance of NewTZlibTransport -func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if p.factory != nil { - // wrap other factory - var err error - trans, err = p.factory.GetTransport(trans) - if err != nil { - return nil, err - } - } - return NewTZlibTransport(trans, p.level) -} - -// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory -func NewTZlibTransportFactory(level int) *TZlibTransportFactory { - return &TZlibTransportFactory{level: level, factory: nil} -} - -// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory -// as a wrapper over existing transport factory -func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory { - return &TZlibTransportFactory{level: level, factory: factory} -} - -// NewTZlibTransport constructs a new instance of TZlibTransport -func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { - w, err := zlib.NewWriterLevel(trans, level) - if err != nil { - return nil, err - } - - return &TZlibTransport{ - writer: w, - transport: trans, - }, nil -} - -// Close closes the reader and writer (flushing any unwritten data) and closes -// the underlying transport. -func (z *TZlibTransport) Close() error { - if z.reader != nil { - if err := z.reader.Close(); err != nil { - return err - } - } - if err := z.writer.Close(); err != nil { - return err - } - return z.transport.Close() -} - -// Flush flushes the writer and its underlying transport. -func (z *TZlibTransport) Flush(ctx context.Context) error { - if err := z.writer.Flush(); err != nil { - return err - } - return z.transport.Flush(ctx) -} - -// IsOpen returns true if the transport is open -func (z *TZlibTransport) IsOpen() bool { - return z.transport.IsOpen() -} - -// Open opens the transport for communication -func (z *TZlibTransport) Open() error { - return z.transport.Open() -} - -func (z *TZlibTransport) Read(p []byte) (int, error) { - if z.reader == nil { - r, err := zlib.NewReader(z.transport) - if err != nil { - return 0, NewTTransportExceptionFromError(err) - } - z.reader = r - } - - return z.reader.Read(p) -} - -// RemainingBytes returns the size in bytes of the data that is still to be -// read. -func (z *TZlibTransport) RemainingBytes() uint64 { - return z.transport.RemainingBytes() -} - -func (z *TZlibTransport) Write(p []byte) (int, error) { - return z.writer.Write(p) -} - -// SetTConfiguration implements TConfigurationSetter for propagation. -func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) { - PropagateTConfiguration(z.transport, conf) -} - -var _ TConfigurationSetter = (*TZlibTransport)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index 20153586..2264200c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -68,6 +68,12 @@ type Config struct { // // See the `aws.EndpointResolverWithOptions` documentation for additional // usage information. + // + // Deprecated: with the release of endpoint resolution v2 in API clients, + // EndpointResolver and EndpointResolverWithOptions are deprecated. + // Providing a value for this field will likely prevent you from using + // newer endpoint-related service features. See API client options + // EndpointResolverV2 and BaseEndpoint. EndpointResolverWithOptions EndpointResolverWithOptions // RetryMaxAttempts specifies the maximum number attempts an API client @@ -132,6 +138,30 @@ type Config struct { // `config.LoadDefaultConfig`. You should not populate this structure // programmatically, or rely on the values here within your applications. RuntimeEnvironment RuntimeEnvironment + + // AppId is an optional application specific identifier that can be set. + // When set it will be appended to the User-Agent header of every request + // in the form of App/{AppId}. This variable is sourced from environment + // variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id. + // See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for + // more information on environment variables and shared config settings. + AppID string + + // BaseEndpoint is an intermediary transfer location to a service specific + // BaseEndpoint on a service's Options. + BaseEndpoint *string + + // DisableRequestCompression toggles if an operation request could be + // compressed or not. Will be set to false by default. This variable is sourced from + // environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute + // disable_request_compression + DisableRequestCompression bool + + // RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be + // compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively. + // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or + // the shared config profile attribute request_min_compression_size_bytes + RequestMinCompressSizeBytes int64 } // NewConfig returns a new Config pointer that can be chained with builder @@ -140,8 +170,7 @@ func NewConfig() *Config { return &Config{} } -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. +// Copy will return a shallow copy of the Config object. func (c Config) Copy() Config { cp := c return cp diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 472cb94b..66d09630 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.7" +const goModuleVersion = "1.24.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go index e6e87ac7..d66f0960 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go @@ -2,6 +2,7 @@ package middleware import ( "context" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/smithy-go/middleware" @@ -42,12 +43,13 @@ func (s RegisterServiceMetadata) HandleInitialize( // service metadata keys for storing and lookup of runtime stack information. type ( - serviceIDKey struct{} - signingNameKey struct{} - signingRegionKey struct{} - regionKey struct{} - operationNameKey struct{} - partitionIDKey struct{} + serviceIDKey struct{} + signingNameKey struct{} + signingRegionKey struct{} + regionKey struct{} + operationNameKey struct{} + partitionIDKey struct{} + requiresLegacyEndpointsKey struct{} ) // GetServiceID retrieves the service id from the context. @@ -63,6 +65,9 @@ func GetServiceID(ctx context.Context) (v string) { // // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. +// +// Deprecated: This value is unstable. The resolved signing name is available +// in the signer properties object passed to the signer. func GetSigningName(ctx context.Context) (v string) { v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string) return v @@ -72,6 +77,9 @@ func GetSigningName(ctx context.Context) (v string) { // // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. +// +// Deprecated: This value is unstable. The resolved signing region is available +// in the signer properties object passed to the signer. func GetSigningRegion(ctx context.Context) (v string) { v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string) return v @@ -104,10 +112,32 @@ func GetPartitionID(ctx context.Context) string { return v } -// SetSigningName set or modifies the signing name on the context. +// GetRequiresLegacyEndpoints the flag used to indicate if legacy endpoint +// customizations need to be executed. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetRequiresLegacyEndpoints(ctx context.Context) bool { + v, _ := middleware.GetStackValue(ctx, requiresLegacyEndpointsKey{}).(bool) + return v +} + +// SetRequiresLegacyEndpoints set or modifies the flag indicated that +// legacy endpoint customizations are needed. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value) +} + +// SetSigningName set or modifies the sigv4 or sigv4a signing name on the context. // // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. +// +// Deprecated: This value is unstable. Use WithSigV4SigningName client option +// funcs instead. func SetSigningName(ctx context.Context, value string) context.Context { return middleware.WithStackValue(ctx, signingNameKey{}, value) } @@ -116,6 +146,9 @@ func SetSigningName(ctx context.Context, value string) context.Context { // // Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues // to clear all stack values. +// +// Deprecated: This value is unstable. Use WithSigV4SigningRegion client option +// funcs instead. func SetSigningRegion(ctx context.Context, value string) context.Context { return middleware.WithStackValue(ctx, signingRegionKey{}, value) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go new file mode 100644 index 00000000..b0133f4c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics/metrics.go @@ -0,0 +1,319 @@ +// Package metrics implements metrics gathering for SDK development purposes. +// +// This package is designated as private and is intended for use only by the +// AWS client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. +package metrics + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/aws/smithy-go/middleware" +) + +const ( + // ServiceIDKey is the key for the service ID metric. + ServiceIDKey = "ServiceId" + // OperationNameKey is the key for the operation name metric. + OperationNameKey = "OperationName" + // ClientRequestIDKey is the key for the client request ID metric. + ClientRequestIDKey = "ClientRequestId" + // APICallDurationKey is the key for the API call duration metric. + APICallDurationKey = "ApiCallDuration" + // APICallSuccessfulKey is the key for the API call successful metric. + APICallSuccessfulKey = "ApiCallSuccessful" + // MarshallingDurationKey is the key for the marshalling duration metric. + MarshallingDurationKey = "MarshallingDuration" + // InThroughputKey is the key for the input throughput metric. + InThroughputKey = "InThroughput" + // OutThroughputKey is the key for the output throughput metric. + OutThroughputKey = "OutThroughput" + // RetryCountKey is the key for the retry count metric. + RetryCountKey = "RetryCount" + // HTTPStatusCodeKey is the key for the HTTP status code metric. + HTTPStatusCodeKey = "HttpStatusCode" + // AWSExtendedRequestIDKey is the key for the AWS extended request ID metric. + AWSExtendedRequestIDKey = "AwsExtendedRequestId" + // AWSRequestIDKey is the key for the AWS request ID metric. + AWSRequestIDKey = "AwsRequestId" + // BackoffDelayDurationKey is the key for the backoff delay duration metric. + BackoffDelayDurationKey = "BackoffDelayDuration" + // StreamThroughputKey is the key for the stream throughput metric. + StreamThroughputKey = "Throughput" + // ConcurrencyAcquireDurationKey is the key for the concurrency acquire duration metric. + ConcurrencyAcquireDurationKey = "ConcurrencyAcquireDuration" + // PendingConcurrencyAcquiresKey is the key for the pending concurrency acquires metric. + PendingConcurrencyAcquiresKey = "PendingConcurrencyAcquires" + // SigningDurationKey is the key for the signing duration metric. + SigningDurationKey = "SigningDuration" + // UnmarshallingDurationKey is the key for the unmarshalling duration metric. + UnmarshallingDurationKey = "UnmarshallingDuration" + // TimeToFirstByteKey is the key for the time to first byte metric. + TimeToFirstByteKey = "TimeToFirstByte" + // ServiceCallDurationKey is the key for the service call duration metric. + ServiceCallDurationKey = "ServiceCallDuration" + // EndpointResolutionDurationKey is the key for the endpoint resolution duration metric. + EndpointResolutionDurationKey = "EndpointResolutionDuration" + // AttemptNumberKey is the key for the attempt number metric. + AttemptNumberKey = "AttemptNumber" + // MaxConcurrencyKey is the key for the max concurrency metric. + MaxConcurrencyKey = "MaxConcurrency" + // AvailableConcurrencyKey is the key for the available concurrency metric. + AvailableConcurrencyKey = "AvailableConcurrency" +) + +// MetricPublisher provides the interface to provide custom MetricPublishers. +// PostRequestMetrics will be invoked by the MetricCollection middleware to post request. +// PostStreamMetrics will be invoked by ReadCloserWithMetrics to post stream metrics. +type MetricPublisher interface { + PostRequestMetrics(*MetricData) error + PostStreamMetrics(*MetricData) error +} + +// Serializer provides the interface to provide custom Serializers. +// Serialize will transform any input object in its corresponding string representation. +type Serializer interface { + Serialize(obj interface{}) (string, error) +} + +// DefaultSerializer is an implementation of the Serializer interface. +type DefaultSerializer struct{} + +// Serialize uses the default JSON serializer to obtain the string representation of an object. +func (DefaultSerializer) Serialize(obj interface{}) (string, error) { + bytes, err := json.Marshal(obj) + if err != nil { + return "", err + } + return string(bytes), nil +} + +type metricContextKey struct{} + +// MetricContext contains fields to store metric-related information. +type MetricContext struct { + connectionCounter *SharedConnectionCounter + publisher MetricPublisher + data *MetricData +} + +// MetricData stores the collected metric data. +type MetricData struct { + RequestStartTime time.Time + RequestEndTime time.Time + APICallDuration time.Duration + SerializeStartTime time.Time + SerializeEndTime time.Time + MarshallingDuration time.Duration + ResolveEndpointStartTime time.Time + ResolveEndpointEndTime time.Time + EndpointResolutionDuration time.Duration + InThroughput float64 + OutThroughput float64 + RetryCount int + Success uint8 + StatusCode int + ClientRequestID string + ServiceID string + OperationName string + PartitionID string + Region string + RequestContentLength int64 + Stream StreamMetrics + Attempts []AttemptMetrics +} + +// StreamMetrics stores metrics related to streaming data. +type StreamMetrics struct { + ReadDuration time.Duration + ReadBytes int64 + Throughput float64 +} + +// AttemptMetrics stores metrics related to individual attempts. +type AttemptMetrics struct { + ServiceCallStart time.Time + ServiceCallEnd time.Time + ServiceCallDuration time.Duration + FirstByteTime time.Time + TimeToFirstByte time.Duration + ConnRequestedTime time.Time + ConnObtainedTime time.Time + ConcurrencyAcquireDuration time.Duration + CredentialFetchStartTime time.Time + CredentialFetchEndTime time.Time + SignStartTime time.Time + SignEndTime time.Time + SigningDuration time.Duration + DeserializeStartTime time.Time + DeserializeEndTime time.Time + UnMarshallingDuration time.Duration + RetryDelay time.Duration + ResponseContentLength int64 + StatusCode int + RequestID string + ExtendedRequestID string + HTTPClient string + MaxConcurrency int + PendingConnectionAcquires int + AvailableConcurrency int + ActiveRequests int + ReusedConnection bool +} + +// Data returns the MetricData associated with the MetricContext. +func (mc *MetricContext) Data() *MetricData { + return mc.data +} + +// ConnectionCounter returns the SharedConnectionCounter associated with the MetricContext. +func (mc *MetricContext) ConnectionCounter() *SharedConnectionCounter { + return mc.connectionCounter +} + +// Publisher returns the MetricPublisher associated with the MetricContext. +func (mc *MetricContext) Publisher() MetricPublisher { + return mc.publisher +} + +// ComputeRequestMetrics calculates and populates derived metrics based on the collected data. +func (md *MetricData) ComputeRequestMetrics() { + + for idx := range md.Attempts { + attempt := &md.Attempts[idx] + attempt.ConcurrencyAcquireDuration = attempt.ConnObtainedTime.Sub(attempt.ConnRequestedTime) + attempt.SigningDuration = attempt.SignEndTime.Sub(attempt.SignStartTime) + attempt.UnMarshallingDuration = attempt.DeserializeEndTime.Sub(attempt.DeserializeStartTime) + attempt.TimeToFirstByte = attempt.FirstByteTime.Sub(attempt.ServiceCallStart) + attempt.ServiceCallDuration = attempt.ServiceCallEnd.Sub(attempt.ServiceCallStart) + } + + md.APICallDuration = md.RequestEndTime.Sub(md.RequestStartTime) + md.MarshallingDuration = md.SerializeEndTime.Sub(md.SerializeStartTime) + md.EndpointResolutionDuration = md.ResolveEndpointEndTime.Sub(md.ResolveEndpointStartTime) + + md.RetryCount = len(md.Attempts) - 1 + + latestAttempt, err := md.LatestAttempt() + + if err != nil { + fmt.Printf("error retrieving attempts data due to: %s. Skipping Throughput metrics", err.Error()) + } else { + + md.StatusCode = latestAttempt.StatusCode + + if md.Success == 1 { + if latestAttempt.ResponseContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { + md.InThroughput = float64(latestAttempt.ResponseContentLength) / latestAttempt.ServiceCallDuration.Seconds() + } + if md.RequestContentLength > 0 && latestAttempt.ServiceCallDuration > 0 { + md.OutThroughput = float64(md.RequestContentLength) / latestAttempt.ServiceCallDuration.Seconds() + } + } + } +} + +// LatestAttempt returns the latest attempt metrics. +// It returns an error if no attempts are initialized. +func (md *MetricData) LatestAttempt() (*AttemptMetrics, error) { + if md.Attempts == nil || len(md.Attempts) == 0 { + return nil, fmt.Errorf("no attempts initialized. NewAttempt() should be called first") + } + return &md.Attempts[len(md.Attempts)-1], nil +} + +// NewAttempt initializes new attempt metrics. +func (md *MetricData) NewAttempt() { + if md.Attempts == nil { + md.Attempts = []AttemptMetrics{} + } + md.Attempts = append(md.Attempts, AttemptMetrics{}) +} + +// SharedConnectionCounter is a counter shared across API calls. +type SharedConnectionCounter struct { + mu sync.Mutex + + activeRequests int + pendingConnectionAcquire int +} + +// ActiveRequests returns the count of active requests. +func (cc *SharedConnectionCounter) ActiveRequests() int { + cc.mu.Lock() + defer cc.mu.Unlock() + + return cc.activeRequests +} + +// PendingConnectionAcquire returns the count of pending connection acquires. +func (cc *SharedConnectionCounter) PendingConnectionAcquire() int { + cc.mu.Lock() + defer cc.mu.Unlock() + + return cc.pendingConnectionAcquire +} + +// AddActiveRequest increments the count of active requests. +func (cc *SharedConnectionCounter) AddActiveRequest() { + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.activeRequests++ +} + +// RemoveActiveRequest decrements the count of active requests. +func (cc *SharedConnectionCounter) RemoveActiveRequest() { + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.activeRequests-- +} + +// AddPendingConnectionAcquire increments the count of pending connection acquires. +func (cc *SharedConnectionCounter) AddPendingConnectionAcquire() { + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.pendingConnectionAcquire++ +} + +// RemovePendingConnectionAcquire decrements the count of pending connection acquires. +func (cc *SharedConnectionCounter) RemovePendingConnectionAcquire() { + cc.mu.Lock() + defer cc.mu.Unlock() + + cc.pendingConnectionAcquire-- +} + +// InitMetricContext initializes the metric context with the provided counter and publisher. +// It returns the updated context. +func InitMetricContext( + ctx context.Context, counter *SharedConnectionCounter, publisher MetricPublisher, +) context.Context { + if middleware.GetStackValue(ctx, metricContextKey{}) == nil { + ctx = middleware.WithStackValue(ctx, metricContextKey{}, &MetricContext{ + connectionCounter: counter, + publisher: publisher, + data: &MetricData{ + Attempts: []AttemptMetrics{}, + Stream: StreamMetrics{}, + }, + }) + } + return ctx +} + +// Context returns the metric context from the given context. +// It returns nil if the metric context is not found. +func Context(ctx context.Context) *MetricContext { + mctx := middleware.GetStackValue(ctx, metricContextKey{}) + if mctx == nil { + return nil + } + return mctx.(*MetricContext) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go new file mode 100644 index 00000000..3f6aaf23 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go @@ -0,0 +1,94 @@ +package middleware + +import ( + "context" + "fmt" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "os" +) + +const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME" +const envAmznTraceID = "_X_AMZN_TRACE_ID" +const amznTraceIDHeader = "X-Amzn-Trace-Id" + +// AddRecursionDetection adds recursionDetection to the middleware stack +func AddRecursionDetection(stack *middleware.Stack) error { + return stack.Build.Add(&RecursionDetection{}, middleware.After) +} + +// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent +// to avoid recursion invocation in Lambda +type RecursionDetection struct{} + +// ID returns the middleware identifier +func (m *RecursionDetection) ID() string { + return "RecursionDetection" +} + +// HandleBuild detects Lambda environment and adds its trace ID to request header if absent +func (m *RecursionDetection) HandleBuild( + ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown request type %T", req) + } + + _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName) + xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID) + value := req.Header.Get(amznTraceIDHeader) + // only set the X-Amzn-Trace-Id header when it is not set initially, the + // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists + if value != "" || !hasLambdaEnv || !hasTraceID { + return next.HandleBuild(ctx, in) + } + + req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID)) + return next.HandleBuild(ctx, in) +} + +func percentEncode(s string) string { + upperhex := "0123456789ABCDEF" + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEncode(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + required := len(s) + 2*hexCount + t := make([]byte, required) + j := 0 + for i := 0; i < len(s); i++ { + if c := s[i]; shouldEncode(c) { + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + return string(t) +} + +func shouldEncode(c byte) bool { + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',': + return false + default: + return true + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go index 285b2bba..af3447dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go @@ -59,6 +59,11 @@ func (k SDKAgentKeyType) string() string { const execEnvVar = `AWS_EXECUTION_ENV` +var validChars = map[rune]bool{ + '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true, + '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true, +} + // requestUserAgent is a build middleware that set the User-Agent for the request. type requestUserAgent struct { sdkAgent, userAgent *smithyhttp.UserAgentBuilder @@ -178,24 +183,24 @@ func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error // AddUserAgentKey adds the component identified by name to the User-Agent string. func (u *requestUserAgent) AddUserAgentKey(key string) { - u.userAgent.AddKey(key) + u.userAgent.AddKey(strings.Map(rules, key)) } // AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) { - u.userAgent.AddKeyValue(key, value) + u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value)) } // AddUserAgentKey adds the component identified by name to the User-Agent string. func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) { // TODO: should target sdkAgent - u.userAgent.AddKey(keyType.string() + "/" + key) + u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key)) } // AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string. func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) { // TODO: should target sdkAgent - u.userAgent.AddKeyValue(keyType.string()+"/"+key, value) + u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value)) } // ID the name of the middleware. @@ -241,3 +246,16 @@ func updateHTTPHeader(request *smithyhttp.Request, header string, value string) } request.Header[header] = append(request.Header[header][:0], current) } + +func rules(r rune) rune { + switch { + case r >= '0' && r <= '9': + return r + case r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z': + return r + case validChars[r]: + return r + default: + return '-' + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md index c95d493e..1e1da56b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -1,3 +1,39 @@ +# v1.5.4 (2023-12-07) + +* No change notes available for this release. + +# v1.5.3 (2023-11-30) + +* No change notes available for this release. + +# v1.5.2 (2023-11-29) + +* No change notes available for this release. + +# v1.5.1 (2023-11-15) + +* No change notes available for this release. + +# v1.5.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). + +# v1.4.14 (2023-10-06) + +* No change notes available for this release. + +# v1.4.13 (2023-08-18) + +* No change notes available for this release. + +# v1.4.12 (2023-08-07) + +* No change notes available for this release. + +# v1.4.11 (2023-07-31) + +* No change notes available for this release. + # v1.4.10 (2022-12-02) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go index 0ca5492a..6759e90e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -3,4 +3,4 @@ package eventstream // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.4.10" +const goModuleVersion = "1.5.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go index 822fc920..dc703d48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go @@ -3,6 +3,7 @@ package retry import ( "context" "fmt" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" "strconv" "strings" "time" @@ -225,6 +226,13 @@ func (r *Attempt) handleAttempt( // that time. Potentially early exist if the sleep is canceled via the // context. retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err) + mctx := metrics.Context(ctx) + if mctx != nil { + attempt, err := mctx.Data().LatestAttempt() + if err != nil { + attempt.RetryDelay = retryDelay + } + } if reqErr != nil { return out, attemptResult, releaseRetryToken, reqErr } @@ -320,10 +328,12 @@ func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresO middleware.LogAttempts = options.LogRetryAttempts }) - if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil { + // index retry to before signing, if signing exists + if err := stack.Finalize.Insert(attempt, "Signing", smithymiddle.Before); err != nil { return err } - if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil { + + if err := stack.Finalize.Insert(&MetricsHeader{}, attempt.ID(), smithymiddle.After); err != nil { return err } return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index c695e6fe..987affdd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -95,6 +95,21 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { var timeoutErr interface{ Timeout() bool } var urlErr *url.Error var netOpErr *net.OpError + var dnsError *net.DNSError + + if errors.As(err, &dnsError) { + // NXDOMAIN errors should not be retried + if dnsError.IsNotFound { + return aws.BoolTernary(false) + } + + // if !dnsError.Temporary(), error may or may not be temporary, + // (i.e. !Temporary() =/=> !retryable) so we should fall through to + // remaining checks + if dnsError.Temporary() { + return aws.BoolTernary(true) + } + } switch { case errors.As(err, &conErr) && conErr.ConnectionError(): diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go index 6777e21e..b0ba4cb2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go @@ -54,7 +54,7 @@ type Retryer interface { MaxAttempts() int // RetryDelay returns the delay that should be used before retrying the - // attempt. Will return error if the if the delay could not be determined. + // attempt. Will return error if the delay could not be determined. RetryDelay(attempt int, opErr error) (time.Duration, error) // GetRetryToken attempts to deduct the retry cost from the retry token pool. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go index 64c4c484..ca738f23 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go @@ -38,6 +38,7 @@ var RequiredSignedHeaders = Rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, @@ -48,6 +49,7 @@ var RequiredSignedHeaders = Rules{ "X-Amz-Request-Payer": struct{}{}, "X-Amz-Server-Side-Encryption": struct{}{}, "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Context": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go index 749bda69..f39a369a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go @@ -11,7 +11,9 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics" v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/internal/sdk" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -57,7 +59,7 @@ func (e *SigningError) Unwrap() error { // S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to // dynamically switch between unsigned and signed payload based on TLS state for request. func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error { - _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) + _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{}) return err } @@ -70,24 +72,22 @@ func (m *dynamicPayloadSigningMiddleware) ID() string { return computePayloadHashMiddlewareID } -// HandleBuild sets a resolver that directs to the payload sha256 compute handler. -func (m *dynamicPayloadSigningMiddleware) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +// HandleFinalize delegates SHA256 computation according to whether the request +// is TLS-enabled. +func (m *dynamicPayloadSigningMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } - // if TLS is enabled, use unsigned payload when supported if req.IsHTTPS() { - return (&unsignedPayload{}).HandleBuild(ctx, in, next) + return (&unsignedPayload{}).HandleFinalize(ctx, in, next) } - - // else fall back to signed payload - return (&computePayloadSHA256{}).HandleBuild(ctx, in, next) + return (&computePayloadSHA256{}).HandleFinalize(ctx, in, next) } // unsignedPayload sets the SigV4 request payload hash to unsigned. @@ -103,7 +103,7 @@ type unsignedPayload struct{} // AddUnsignedPayloadMiddleware adds unsignedPayload to the operation // middleware stack func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error { - return stack.Build.Add(&unsignedPayload{}, middleware.After) + return stack.Finalize.Insert(&unsignedPayload{}, "ResolveEndpointV2", middleware.After) } // ID returns the unsignedPayload identifier @@ -111,23 +111,16 @@ func (m *unsignedPayload) ID() string { return computePayloadHashMiddlewareID } -// HandleBuild sets the payload hash to be an unsigned payload -func (m *unsignedPayload) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +// HandleFinalize sets the payload hash magic value to the unsigned sentinel. +func (m *unsignedPayload) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - // This should not compute the content SHA256 if the value is already - // known. (e.g. application pre-computed SHA256 before making API call). - // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if - // that header is provided a middleware must translate it into the context. - contentSHA := GetPayloadHash(ctx) - if len(contentSHA) == 0 { - contentSHA = v4Internal.UnsignedPayload + if GetPayloadHash(ctx) == "" { + ctx = SetPayloadHash(ctx, v4Internal.UnsignedPayload) } - - ctx = SetPayloadHash(ctx, contentSHA) - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } // computePayloadSHA256 computes SHA256 payload hash to sign. @@ -143,13 +136,13 @@ type computePayloadSHA256 struct{} // AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the // operation middleware stack func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error { - return stack.Build.Add(&computePayloadSHA256{}, middleware.After) + return stack.Finalize.Insert(&computePayloadSHA256{}, "ResolveEndpointV2", middleware.After) } // RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the // operation middleware stack func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error { - _, err := stack.Build.Remove(computePayloadHashMiddlewareID) + _, err := stack.Finalize.Remove(computePayloadHashMiddlewareID) return err } @@ -158,12 +151,17 @@ func (m *computePayloadSHA256) ID() string { return computePayloadHashMiddlewareID } -// HandleBuild compute the payload hash for the request payload -func (m *computePayloadSHA256) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +// HandleFinalize computes the payload hash for the request, storing it to the +// context. This is a no-op if a caller has previously set that value. +func (m *computePayloadSHA256) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { + if GetPayloadHash(ctx) != "" { + return next.HandleFinalize(ctx, in) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, &HashComputationError{ @@ -171,14 +169,6 @@ func (m *computePayloadSHA256) HandleBuild( } } - // This should not compute the content SHA256 if the value is already - // known. (e.g. application pre-computed SHA256 before making API call) - // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if - // that header is provided a middleware must translate it into the context. - if contentSHA := GetPayloadHash(ctx); len(contentSHA) != 0 { - return next.HandleBuild(ctx, in) - } - hash := sha256.New() if stream := req.GetStream(); stream != nil { _, err = io.Copy(hash, stream) @@ -197,7 +187,7 @@ func (m *computePayloadSHA256) HandleBuild( ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil))) - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } // SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the @@ -206,7 +196,7 @@ func (m *computePayloadSHA256) HandleBuild( // Use this to disable computing the Payload SHA256 checksum and instead use // UNSIGNED-PAYLOAD for the SHA256 value. func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error { - _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) + _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &unsignedPayload{}) return err } @@ -217,13 +207,13 @@ type contentSHA256Header struct{} // AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the // operation middleware stack func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - return stack.Build.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) + return stack.Finalize.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After) } // RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware // from the operation middleware stack func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error { - _, err := stack.Build.Remove((*contentSHA256Header)(nil).ID()) + _, err := stack.Finalize.Remove((*contentSHA256Header)(nil).ID()) return err } @@ -232,12 +222,12 @@ func (m *contentSHA256Header) ID() string { return "SigV4ContentSHA256Header" } -// HandleBuild sets the X-Amz-Content-Sha256 header value to the Payload hash +// HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash // stored in the context. -func (m *contentSHA256Header) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +func (m *contentSHA256Header) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { req, ok := in.Request.(*smithyhttp.Request) if !ok { @@ -245,25 +235,35 @@ func (m *contentSHA256Header) HandleBuild( } req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx)) - - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } -// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware. +// SignHTTPRequestMiddlewareOptions is the configuration options for +// [SignHTTPRequestMiddleware]. +// +// Deprecated: [SignHTTPRequestMiddleware] is deprecated. type SignHTTPRequestMiddlewareOptions struct { CredentialsProvider aws.CredentialsProvider Signer HTTPSigner LogSigning bool } -// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 HTTP Signing +// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 +// HTTP Signing. +// +// Deprecated: AWS service clients no longer use this middleware. Signing as an +// SDK operation is now performed through an internal per-service middleware +// which opaquely selects and uses the signer from the resolved auth scheme. type SignHTTPRequestMiddleware struct { credentialsProvider aws.CredentialsProvider signer HTTPSigner logSigning bool } -// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests +// NewSignHTTPRequestMiddleware constructs a [SignHTTPRequestMiddleware] using +// the given [Signer] for signing requests. +// +// Deprecated: SignHTTPRequestMiddleware is deprecated. func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware { return &SignHTTPRequestMiddleware{ credentialsProvider: options.CredentialsProvider, @@ -272,12 +272,17 @@ func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *Sig } } -// ID is the SignHTTPRequestMiddleware identifier +// ID is the SignHTTPRequestMiddleware identifier. +// +// Deprecated: SignHTTPRequestMiddleware is deprecated. func (s *SignHTTPRequestMiddleware) ID() string { return "Signing" } -// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +// HandleFinalize will take the provided input and sign the request using the +// SigV4 authentication scheme. +// +// Deprecated: SignHTTPRequestMiddleware is deprecated. func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { @@ -296,16 +301,56 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")} } + mctx := metrics.Context(ctx) + + if mctx != nil { + if attempt, err := mctx.Data().LatestAttempt(); err == nil { + attempt.CredentialFetchStartTime = sdk.NowTime() + } + } + credentials, err := s.credentialsProvider.Retrieve(ctx) + + if mctx != nil { + if attempt, err := mctx.Data().LatestAttempt(); err == nil { + attempt.CredentialFetchEndTime = sdk.NowTime() + } + } + if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} } - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), + signerOptions := []func(o *SignerOptions){ func(o *SignerOptions) { o.Logger = middleware.GetLogger(ctx) o.LogSigning = s.logSigning + }, + } + + // existing DisableURIPathEscaping is equivalent in purpose + // to authentication scheme property DisableDoubleEncoding + disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) + if overridden { + signerOptions = append(signerOptions, func(o *SignerOptions) { + o.DisableURIPathEscaping = disableDoubleEncoding }) + } + + if mctx != nil { + if attempt, err := mctx.Data().LatestAttempt(); err == nil { + attempt.SignStartTime = sdk.NowTime() + } + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...) + + if mctx != nil { + if attempt, err := mctx.Data().LatestAttempt(); err == nil { + attempt.SignEndTime = sdk.NowTime() + } + } + if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} } @@ -319,17 +364,17 @@ type streamingEventsPayload struct{} // AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack. func AddStreamingEventsPayload(stack *middleware.Stack) error { - return stack.Build.Add(&streamingEventsPayload{}, middleware.After) + return stack.Finalize.Add(&streamingEventsPayload{}, middleware.Before) } func (s *streamingEventsPayload) ID() string { return computePayloadHashMiddlewareID } -func (s *streamingEventsPayload) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +func (s *streamingEventsPayload) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { contentSHA := GetPayloadHash(ctx) if len(contentSHA) == 0 { @@ -338,7 +383,7 @@ func (s *streamingEventsPayload) HandleBuild( ctx = SetPayloadHash(ctx, contentSHA) - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } // GetSignedRequestSignature attempts to extract the signature of the request. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go index afd069c1..bb61904e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go @@ -68,6 +68,9 @@ import ( const ( signingAlgorithm = "AWS4-HMAC-SHA256" authorizationHeader = "Authorization" + + // Version of signing v4 + Version = "SigV4" ) // HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests @@ -103,6 +106,11 @@ type SignerOptions struct { // This will enable logging of the canonical request, the string to sign, and for presigning the subsequent // presigned URL. LogSigning bool + + // Disables setting the session token on the request as part of signing + // through X-Amz-Security-Token. This is needed for variations of v4 that + // present the token elsewhere. + DisableSessionToken bool } // Signer applies AWS v4 signing to given request. Use this to sign requests @@ -136,6 +144,7 @@ type httpSigner struct { DisableHeaderHoisting bool DisableURIPathEscaping bool + DisableSessionToken bool } func (s *httpSigner) Build() (signedRequest, error) { @@ -284,6 +293,7 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht Time: v4Internal.NewSigningTime(signingTime.UTC()), DisableHeaderHoisting: options.DisableHeaderHoisting, DisableURIPathEscaping: options.DisableURIPathEscaping, + DisableSessionToken: options.DisableSessionToken, KeyDerivator: s.keyDerivator, } @@ -335,7 +345,7 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht // // expires := 20 * time.Minute // query := req.URL.Query() -// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10) +// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)) // req.URL.RawQuery = query.Encode() // // This method does not modify the provided request. @@ -360,6 +370,7 @@ func (s *Signer) PresignHTTP( IsPreSign: true, DisableHeaderHoisting: options.DisableHeaderHoisting, DisableURIPathEscaping: options.DisableURIPathEscaping, + DisableSessionToken: options.DisableSessionToken, KeyDerivator: s.keyDerivator, } @@ -502,7 +513,8 @@ func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Val if s.IsPreSign { query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm) - if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 { + sessionToken := s.Credentials.SessionToken + if !s.DisableSessionToken && len(sessionToken) > 0 { query.Set("X-Amz-Security-Token", sessionToken) } @@ -512,7 +524,7 @@ func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Val headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate) - if len(s.Credentials.SessionToken) > 0 { + if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 { headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken) } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 7a0981d8..989c4ea3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,189 @@ +# v1.16.16 (2024-01-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.15 (2024-01-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.14 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.13 (2023-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.12 (2023-12-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.11 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.10 (2023-12-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2023-11-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2023-11-21) + +* **Bug Fix**: Don't expect error responses to have a JSON payload in the endpointcreds provider. + +# v1.16.3 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2023-11-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-11-14) + +* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider. + +# v1.15.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-11-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.43 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.42 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.41 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.40 (2023-09-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.39 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.38 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.37 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.36 (2023-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.31 (2023-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.30 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.29 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.28 (2023-07-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.27 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.26 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.25 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.24 (2023-05-09) + +* No change notes available for this release. + +# v1.13.23 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.22 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.21 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.20 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.19 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.18 (2023-03-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index c715edbe..fe92184d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.18" +const goModuleVersion = "1.16.16" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md index d36e93a7..fe53e9a5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md @@ -1,3 +1,267 @@ +# v1.15.15 (2024-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.14 (2024-01-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.13 (2024-01-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.12 (2024-01-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.11 (2024-01-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.9 (2023-12-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.8 (2023-12-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.7 (2023-12-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2023-12-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.4 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-11-28.3) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-11-28.2) + +* **Feature**: Add S3Express support. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-11-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.3 (2023-11-27) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-11-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.1 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-11-17) + +* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2023-11-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.8 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.7 (2023-11-14) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.6 (2023-11-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.5 (2023-11-09.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.4 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.3 (2023-11-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.2 (2023-11-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.1 (2023-11-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.92 (2023-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.91 (2023-10-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.90 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.89 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.88 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.87 (2023-09-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.86 (2023-09-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.85 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.84 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.83 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.82 (2023-08-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.81 (2023-08-23) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.80 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.79 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.78 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.77 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.76 (2023-08-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.75 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.74 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.73 (2023-07-25) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.72 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.71 (2023-06-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.70 (2023-06-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.69 (2023-06-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.68 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.67 (2023-05-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.66 (2023-05-08) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.65 (2023-05-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.64 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.63 (2023-04-19) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.62 (2023-04-10) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.61 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.60 (2023-03-31) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.59 (2023-03-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go index 2d8bd7e0..a6a9781e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/bucket_region.go @@ -65,7 +65,6 @@ func GetBucketRegion(ctx context.Context, client HeadBucketAPIClient, bucket str clientOptionFns := make([]func(*s3.Options), len(optFns)+1) clientOptionFns[0] = func(options *s3.Options) { - options.Credentials = aws.AnonymousCredentials{} options.APIOptions = append(options.APIOptions, captureBucketRegion.RegisterMiddleware) } copy(clientOptionFns[1:], optFns) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go index 2ebcea58..06070ada 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/download.go @@ -436,8 +436,8 @@ func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { if resp.ContentRange == nil { // ContentRange is nil when the full file contents is provided, and // is not chunked. Use ContentLength instead. - if resp.ContentLength > 0 { - d.totalBytes = resp.ContentLength + if aws.ToInt64(resp.ContentLength) > 0 { + d.totalBytes = aws.ToInt64(resp.ContentLength) return } } else { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go index 3cd6c2d2..6fe1aa68 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go @@ -3,4 +3,4 @@ package manager // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.59" +const goModuleVersion = "1.15.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go index d68246c2..d1be506e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/upload.go @@ -13,8 +13,11 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/internal/awsutil" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithymiddleware "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" ) // MaxUploadParts is the maximum allowed number of parts in a multi-part upload @@ -308,6 +311,9 @@ func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ... clientOptions = append(clientOptions, func(o *s3.Options) { o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey), + func(s *smithymiddleware.Stack) error { + return s.Finalize.Insert(&setS3ExpressDefaultChecksum{}, "ResolveEndpointV2", smithymiddleware.After) + }, ) }) clientOptions = append(clientOptions, i.cfg.ClientOptions...) @@ -501,7 +507,7 @@ func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, e return &UploadOutput{ Location: locationRecorder.location, - BucketKeyEnabled: out.BucketKeyEnabled, + BucketKeyEnabled: aws.ToBool(out.BucketKeyEnabled), ChecksumCRC32: out.ChecksumCRC32, ChecksumCRC32C: out.ChecksumCRC32C, ChecksumSHA1: out.ChecksumSHA1, @@ -568,9 +574,11 @@ type chunk struct { // since S3 required this list to be sent in sorted order. type completedParts []types.CompletedPart -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { + return aws.ToInt32(a[i].PartNumber) < aws.ToInt32(a[j].PartNumber) +} // upload will perform a multipart upload using the firstBuf buffer containing // the first chunk of data. @@ -639,7 +647,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO UploadID: u.uploadID, CompletedParts: u.parts, - BucketKeyEnabled: completeOut.BucketKeyEnabled, + BucketKeyEnabled: aws.ToBool(completeOut.BucketKeyEnabled), ChecksumCRC32: completeOut.ChecksumCRC32, ChecksumCRC32C: completeOut.ChecksumCRC32C, ChecksumSHA1: completeOut.ChecksumSHA1, @@ -722,7 +730,7 @@ func (u *multiuploader) send(c chunk) error { // PutObject as they are never valid for individual parts of a // multipart upload. - PartNumber: c.num, + PartNumber: aws.Int32(c.num), UploadId: &u.uploadID, } // TODO should do copy then clear? @@ -734,7 +742,7 @@ func (u *multiuploader) send(c chunk) error { var completed types.CompletedPart awsutil.Copy(&completed, resp) - completed.PartNumber = c.num + completed.PartNumber = aws.Int32(c.num) u.m.Lock() u.parts = append(u.parts, completed) @@ -806,3 +814,42 @@ type readerAtSeeker interface { io.ReaderAt io.ReadSeeker } + +// setS3ExpressDefaultChecksum defaults to CRC32 for S3Express buckets, +// which is required when uploading to those through transfer manager. +type setS3ExpressDefaultChecksum struct{} + +func (*setS3ExpressDefaultChecksum) ID() string { + return "setS3ExpressDefaultChecksum" +} + +func (*setS3ExpressDefaultChecksum) HandleFinalize( + ctx context.Context, in smithymiddleware.FinalizeInput, next smithymiddleware.FinalizeHandler, +) ( + out smithymiddleware.FinalizeOutput, metadata smithymiddleware.Metadata, err error, +) { + const checksumHeader = "x-amz-checksum-algorithm" + + if internalcontext.GetS3Backend(ctx) != internalcontext.S3BackendS3Express { + return next.HandleFinalize(ctx, in) + } + + // If this is CreateMultipartUpload we need to ensure the checksum + // algorithm header is present. Otherwise everything is driven off the + // context setting and we can let it flow from there. + if middleware.GetOperationName(ctx) == "CreateMultipartUpload" { + r, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if internalcontext.GetChecksumInputAlgorithm(ctx) == "" { + r.Header.Set(checksumHeader, "CRC32") + } + return next.HandleFinalize(ctx, in) + } else if internalcontext.GetChecksumInputAlgorithm(ctx) == "" { + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(types.ChecksumAlgorithmCrc32)) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go new file mode 100644 index 00000000..0b81db54 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go @@ -0,0 +1,45 @@ +package auth + +import ( + "github.com/aws/smithy-go/auth" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// HTTPAuthScheme is the SDK's internal implementation of smithyhttp.AuthScheme +// for pre-existing implementations where the signer was added to client +// config. SDK clients will key off of this type and ensure per-operation +// updates to those signers persist on the scheme itself. +type HTTPAuthScheme struct { + schemeID string + signer smithyhttp.Signer +} + +var _ smithyhttp.AuthScheme = (*HTTPAuthScheme)(nil) + +// NewHTTPAuthScheme returns an auth scheme instance with the given config. +func NewHTTPAuthScheme(schemeID string, signer smithyhttp.Signer) *HTTPAuthScheme { + return &HTTPAuthScheme{ + schemeID: schemeID, + signer: signer, + } +} + +// SchemeID identifies the auth scheme. +func (s *HTTPAuthScheme) SchemeID() string { + return s.schemeID +} + +// IdentityResolver gets the identity resolver for the auth scheme. +func (s *HTTPAuthScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { + return o.GetIdentityResolver(s.schemeID) +} + +// Signer gets the signer for the auth scheme. +func (s *HTTPAuthScheme) Signer() smithyhttp.Signer { + return s.signer +} + +// WithSigner returns a new instance of the auth scheme with the updated signer. +func (s *HTTPAuthScheme) WithSigner(signer smithyhttp.Signer) *HTTPAuthScheme { + return NewHTTPAuthScheme(s.schemeID, signer) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go new file mode 100644 index 00000000..bbc2ec06 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go @@ -0,0 +1,191 @@ +package auth + +import ( + "context" + "fmt" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/middleware" +) + +// SigV4 is a constant representing +// Authentication Scheme Signature Version 4 +const SigV4 = "sigv4" + +// SigV4A is a constant representing +// Authentication Scheme Signature Version 4A +const SigV4A = "sigv4a" + +// SigV4S3Express identifies the S3 S3Express auth scheme. +const SigV4S3Express = "sigv4-s3express" + +// None is a constant representing the +// None Authentication Scheme +const None = "none" + +// SupportedSchemes is a data structure +// that indicates the list of supported AWS +// authentication schemes +var SupportedSchemes = map[string]bool{ + SigV4: true, + SigV4A: true, + SigV4S3Express: true, + None: true, +} + +// AuthenticationScheme is a representation of +// AWS authentication schemes +type AuthenticationScheme interface { + isAuthenticationScheme() +} + +// AuthenticationSchemeV4 is a AWS SigV4 representation +type AuthenticationSchemeV4 struct { + Name string + SigningName *string + SigningRegion *string + DisableDoubleEncoding *bool +} + +func (a *AuthenticationSchemeV4) isAuthenticationScheme() {} + +// AuthenticationSchemeV4A is a AWS SigV4A representation +type AuthenticationSchemeV4A struct { + Name string + SigningName *string + SigningRegionSet []string + DisableDoubleEncoding *bool +} + +func (a *AuthenticationSchemeV4A) isAuthenticationScheme() {} + +// AuthenticationSchemeNone is a representation for the none auth scheme +type AuthenticationSchemeNone struct{} + +func (a *AuthenticationSchemeNone) isAuthenticationScheme() {} + +// NoAuthenticationSchemesFoundError is used in signaling +// that no authentication schemes have been specified. +type NoAuthenticationSchemesFoundError struct{} + +func (e *NoAuthenticationSchemesFoundError) Error() string { + return fmt.Sprint("No authentication schemes specified.") +} + +// UnSupportedAuthenticationSchemeSpecifiedError is used in +// signaling that only unsupported authentication schemes +// were specified. +type UnSupportedAuthenticationSchemeSpecifiedError struct { + UnsupportedSchemes []string +} + +func (e *UnSupportedAuthenticationSchemeSpecifiedError) Error() string { + return fmt.Sprint("Unsupported authentication scheme specified.") +} + +// GetAuthenticationSchemes extracts the relevant authentication scheme data +// into a custom strongly typed Go data structure. +func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, error) { + var result []AuthenticationScheme + if !p.Has("authSchemes") { + return nil, &NoAuthenticationSchemesFoundError{} + } + + authSchemes, _ := p.Get("authSchemes").([]interface{}) + + var unsupportedSchemes []string + for _, scheme := range authSchemes { + authScheme, _ := scheme.(map[string]interface{}) + + version := authScheme["name"].(string) + switch version { + case SigV4, SigV4S3Express: + v4Scheme := AuthenticationSchemeV4{ + Name: version, + SigningName: getSigningName(authScheme), + SigningRegion: getSigningRegion(authScheme), + DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), + } + result = append(result, AuthenticationScheme(&v4Scheme)) + case SigV4A: + v4aScheme := AuthenticationSchemeV4A{ + Name: SigV4A, + SigningName: getSigningName(authScheme), + SigningRegionSet: getSigningRegionSet(authScheme), + DisableDoubleEncoding: getDisableDoubleEncoding(authScheme), + } + result = append(result, AuthenticationScheme(&v4aScheme)) + case None: + noneScheme := AuthenticationSchemeNone{} + result = append(result, AuthenticationScheme(&noneScheme)) + default: + unsupportedSchemes = append(unsupportedSchemes, authScheme["name"].(string)) + continue + } + } + + if len(result) == 0 { + return nil, &UnSupportedAuthenticationSchemeSpecifiedError{ + UnsupportedSchemes: unsupportedSchemes, + } + } + + return result, nil +} + +type disableDoubleEncoding struct{} + +// SetDisableDoubleEncoding sets or modifies the disable double encoding option +// on the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func SetDisableDoubleEncoding(ctx context.Context, value bool) context.Context { + return middleware.WithStackValue(ctx, disableDoubleEncoding{}, value) +} + +// GetDisableDoubleEncoding retrieves the disable double encoding option +// from the context. +// +// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues +// to clear all stack values. +func GetDisableDoubleEncoding(ctx context.Context) (value bool, ok bool) { + value, ok = middleware.GetStackValue(ctx, disableDoubleEncoding{}).(bool) + return value, ok +} + +func getSigningName(authScheme map[string]interface{}) *string { + signingName, ok := authScheme["signingName"].(string) + if !ok || signingName == "" { + return nil + } + return &signingName +} + +func getSigningRegionSet(authScheme map[string]interface{}) []string { + untypedSigningRegionSet, ok := authScheme["signingRegionSet"].([]interface{}) + if !ok { + return nil + } + signingRegionSet := []string{} + for _, item := range untypedSigningRegionSet { + signingRegionSet = append(signingRegionSet, item.(string)) + } + return signingRegionSet +} + +func getSigningRegion(authScheme map[string]interface{}) *string { + signingRegion, ok := authScheme["signingRegion"].(string) + if !ok || signingRegion == "" { + return nil + } + return &signingRegion +} + +func getDisableDoubleEncoding(authScheme map[string]interface{}) *bool { + disableDoubleEncoding, ok := authScheme["disableDoubleEncoding"].(bool) + if !ok { + return nil + } + return &disableDoubleEncoding +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go new file mode 100644 index 00000000..f059b5d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go @@ -0,0 +1,43 @@ +package smithy + +import ( + "context" + "fmt" + "time" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/auth/bearer" +) + +// BearerTokenAdapter adapts smithy bearer.Token to smithy auth.Identity. +type BearerTokenAdapter struct { + Token bearer.Token +} + +var _ auth.Identity = (*BearerTokenAdapter)(nil) + +// Expiration returns the time of expiration for the token. +func (v *BearerTokenAdapter) Expiration() time.Time { + return v.Token.Expires +} + +// BearerTokenProviderAdapter adapts smithy bearer.TokenProvider to smithy +// auth.IdentityResolver. +type BearerTokenProviderAdapter struct { + Provider bearer.TokenProvider +} + +var _ (auth.IdentityResolver) = (*BearerTokenProviderAdapter)(nil) + +// GetIdentity retrieves a bearer token using the underlying provider. +func (v *BearerTokenProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( + auth.Identity, error, +) { + token, err := v.Provider.RetrieveBearerToken(ctx) + if err != nil { + return nil, fmt.Errorf("get token: %w", err) + } + + return &BearerTokenAdapter{Token: token}, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go new file mode 100644 index 00000000..a8828152 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go @@ -0,0 +1,35 @@ +package smithy + +import ( + "context" + "fmt" + + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/auth/bearer" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// BearerTokenSignerAdapter adapts smithy bearer.Signer to smithy http +// auth.Signer. +type BearerTokenSignerAdapter struct { + Signer bearer.Signer +} + +var _ (smithyhttp.Signer) = (*BearerTokenSignerAdapter)(nil) + +// SignRequest signs the request with the provided bearer token. +func (v *BearerTokenSignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, _ smithy.Properties) error { + ca, ok := identity.(*BearerTokenAdapter) + if !ok { + return fmt.Errorf("unexpected identity type: %T", identity) + } + + signed, err := v.Signer.SignWithBearerToken(ctx, ca.Token, r) + if err != nil { + return fmt.Errorf("sign request: %w", err) + } + + *r = *signed.(*smithyhttp.Request) + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go new file mode 100644 index 00000000..f926c4aa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go @@ -0,0 +1,46 @@ +package smithy + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" +) + +// CredentialsAdapter adapts aws.Credentials to auth.Identity. +type CredentialsAdapter struct { + Credentials aws.Credentials +} + +var _ auth.Identity = (*CredentialsAdapter)(nil) + +// Expiration returns the time of expiration for the credentials. +func (v *CredentialsAdapter) Expiration() time.Time { + return v.Credentials.Expires +} + +// CredentialsProviderAdapter adapts aws.CredentialsProvider to auth.IdentityResolver. +type CredentialsProviderAdapter struct { + Provider aws.CredentialsProvider +} + +var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil) + +// GetIdentity retrieves AWS credentials using the underlying provider. +func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( + auth.Identity, error, +) { + if v.Provider == nil { + return &CredentialsAdapter{Credentials: aws.Credentials{}}, nil + } + + creds, err := v.Provider.Retrieve(ctx) + if err != nil { + return nil, fmt.Errorf("get credentials: %w", err) + } + + return &CredentialsAdapter{Credentials: creds}, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go new file mode 100644 index 00000000..42b45867 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go @@ -0,0 +1,2 @@ +// Package smithy adapts concrete AWS auth and signing types to the generic smithy versions. +package smithy diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go new file mode 100644 index 00000000..0c5a2d40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go @@ -0,0 +1,53 @@ +package smithy + +import ( + "context" + "fmt" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// V4SignerAdapter adapts v4.HTTPSigner to smithy http.Signer. +type V4SignerAdapter struct { + Signer v4.HTTPSigner + Logger logging.Logger + LogSigning bool +} + +var _ (smithyhttp.Signer) = (*V4SignerAdapter)(nil) + +// SignRequest signs the request with the provided identity. +func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { + ca, ok := identity.(*CredentialsAdapter) + if !ok { + return fmt.Errorf("unexpected identity type: %T", identity) + } + + name, ok := smithyhttp.GetSigV4SigningName(&props) + if !ok { + return fmt.Errorf("sigv4 signing name is required") + } + + region, ok := smithyhttp.GetSigV4SigningRegion(&props) + if !ok { + return fmt.Errorf("sigv4 signing region is required") + } + + hash := v4.GetPayloadHash(ctx) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { + o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) + + o.Logger = v.Logger + o.LogSigning = v.LogSigning + }) + if err != nil { + return fmt.Errorf("sign http: %w", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index e1137950..dc87ec41 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,96 @@ +# v1.2.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.8 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.43 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.42 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.41 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.40 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.39 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.38 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.37 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.36 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.35 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.34 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.33 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.32 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.31 (2023-03-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go new file mode 100644 index 00000000..e7835f85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go @@ -0,0 +1,57 @@ +package configsources + +import ( + "context" +) + +// ServiceBaseEndpointProvider is needed to search for all providers +// that provide a configured service endpoint +type ServiceBaseEndpointProvider interface { + GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) +} + +// IgnoreConfiguredEndpointsProvider is needed to search for all providers +// that provide a flag to disable configured endpoints. +// +// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because +// service packages cannot import github.com/aws/aws-sdk-go-v2/config +// due to result import cycle error. +type IgnoreConfiguredEndpointsProvider interface { + GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +// +// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because +// service packages cannot import github.com/aws/aws-sdk-go-v2/config +// due to result import cycle error. +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { + value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources +// while allowing for configured endpoints to be disabled +func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) { + if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val { + return "", false, nil + } + + for _, cs := range configs { + if p, ok := cs.(ServiceBaseEndpointProvider); ok { + value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index 47e3ce35..41ee0bfb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.31" +const goModuleVersion = "1.2.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go new file mode 100644 index 00000000..15bf1047 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go @@ -0,0 +1,39 @@ +package context + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type s3BackendKey struct{} +type checksumInputAlgorithmKey struct{} + +const ( + // S3BackendS3Express identifies the S3Express backend + S3BackendS3Express = "S3Express" +) + +// SetS3Backend stores the resolved endpoint backend within the request +// context, which is required for a variety of custom S3 behaviors. +func SetS3Backend(ctx context.Context, typ string) context.Context { + return middleware.WithStackValue(ctx, s3BackendKey{}, typ) +} + +// GetS3Backend retrieves the stored endpoint backend within the context. +func GetS3Backend(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string) + return v +} + +// SetChecksumInputAlgorithm sets the request checksum algorithm on the +// context. +func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context { + return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value) +} + +// GetChecksumInputAlgorithm returns the checksum algorithm from the context. +func GetChecksumInputAlgorithm(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go new file mode 100644 index 00000000..e6223dd3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go @@ -0,0 +1,94 @@ +package awsrulesfn + +import ( + "strings" +) + +// ARN provides AWS ARN components broken out into a data structure. +type ARN struct { + Partition string + Service string + Region string + AccountId string + ResourceId OptionalStringSlice +} + +const ( + arnDelimiters = ":" + resourceDelimiters = "/:" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 +) + +// ParseARN returns an [ARN] value parsed from the input string provided. If +// the ARN cannot be parsed nil will be returned, and error added to +// [ErrorCollector]. +func ParseARN(input string) *ARN { + if !strings.HasPrefix(input, arnPrefix) { + return nil + } + + sections := strings.SplitN(input, arnDelimiters, arnSections) + if numSections := len(sections); numSections != arnSections { + return nil + } + + if sections[sectionPartition] == "" { + return nil + } + if sections[sectionService] == "" { + return nil + } + if sections[sectionResource] == "" { + return nil + } + + return &ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountId: sections[sectionAccountID], + ResourceId: splitResource(sections[sectionResource]), + } +} + +// splitResource splits the resource components by the ARN resource delimiters. +func splitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// OptionalStringSlice provides a helper to safely get the index of a string +// slice that may be out of bounds. Returns pointer to string if index is +// valid. Otherwise returns nil. +type OptionalStringSlice []string + +// Get returns a string pointer of the string at index i if the index is valid. +// Otherwise returns nil. +func (s OptionalStringSlice) Get(i int) *string { + if i < 0 || i >= len(s) { + return nil + } + + v := s[i] + return &v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go new file mode 100644 index 00000000..d5a36585 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go @@ -0,0 +1,3 @@ +// Package awsrulesfn provides AWS focused endpoint rule functions for +// evaluating endpoint resolution rules. +package awsrulesfn diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go new file mode 100644 index 00000000..df72da97 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go @@ -0,0 +1,7 @@ +//go:build codegen +// +build codegen + +package awsrulesfn + +//go:generate go run -tags codegen ./internal/partition/codegen.go -model partitions.json -output partitions.go +//go:generate gofmt -w -s . diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go new file mode 100644 index 00000000..637e5fc1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go @@ -0,0 +1,51 @@ +package awsrulesfn + +import ( + "net" + "strings" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// IsVirtualHostableS3Bucket returns if the input is a DNS compatible bucket +// name and can be used with Amazon S3 virtual hosted style addressing. Similar +// to [rulesfn.IsValidHostLabel] with the added restriction that the length of label +// must be [3:63] characters long, all lowercase, and not formatted as an IP +// address. +func IsVirtualHostableS3Bucket(input string, allowSubDomains bool) bool { + // input should not be formatted as an IP address + // NOTE: this will technically trip up on IPv6 hosts with zone IDs, but + // validation further down will catch that anyway (it's guaranteed to have + // unfriendly characters % and : if that's the case) + if net.ParseIP(input) != nil { + return false + } + + var labels []string + if allowSubDomains { + labels = strings.Split(input, ".") + } else { + labels = []string{input} + } + + for _, label := range labels { + // validate special length constraints + if l := len(label); l < 3 || l > 63 { + return false + } + + // Validate no capital letters + for _, r := range label { + if r >= 'A' && r <= 'Z' { + return false + } + } + + // Validate valid host label + if !smithyhttp.ValidHostLabel(label) { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go new file mode 100644 index 00000000..ba603275 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go @@ -0,0 +1,75 @@ +package awsrulesfn + +import "regexp" + +// Partition provides the metadata describing an AWS partition. +type Partition struct { + ID string `json:"id"` + Regions map[string]RegionOverrides `json:"regions"` + RegionRegex string `json:"regionRegex"` + DefaultConfig PartitionConfig `json:"outputs"` +} + +// PartitionConfig provides the endpoint metadata for an AWS region or partition. +type PartitionConfig struct { + Name string `json:"name"` + DnsSuffix string `json:"dnsSuffix"` + DualStackDnsSuffix string `json:"dualStackDnsSuffix"` + SupportsFIPS bool `json:"supportsFIPS"` + SupportsDualStack bool `json:"supportsDualStack"` +} + +type RegionOverrides struct { + Name *string `json:"name"` + DnsSuffix *string `json:"dnsSuffix"` + DualStackDnsSuffix *string `json:"dualStackDnsSuffix"` + SupportsFIPS *bool `json:"supportsFIPS"` + SupportsDualStack *bool `json:"supportsDualStack"` +} + +const defaultPartition = "aws" + +func getPartition(partitions []Partition, region string) *PartitionConfig { + for _, partition := range partitions { + if v, ok := partition.Regions[region]; ok { + p := mergeOverrides(partition.DefaultConfig, v) + return &p + } + } + + for _, partition := range partitions { + regionRegex := regexp.MustCompile(partition.RegionRegex) + if regionRegex.MatchString(region) { + v := partition.DefaultConfig + return &v + } + } + + for _, partition := range partitions { + if partition.ID == defaultPartition { + v := partition.DefaultConfig + return &v + } + } + + return nil +} + +func mergeOverrides(into PartitionConfig, from RegionOverrides) PartitionConfig { + if from.Name != nil { + into.Name = *from.Name + } + if from.DnsSuffix != nil { + into.DnsSuffix = *from.DnsSuffix + } + if from.DualStackDnsSuffix != nil { + into.DualStackDnsSuffix = *from.DualStackDnsSuffix + } + if from.SupportsFIPS != nil { + into.SupportsFIPS = *from.SupportsFIPS + } + if from.SupportsDualStack != nil { + into.SupportsDualStack = *from.SupportsDualStack + } + return into +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go new file mode 100644 index 00000000..849beffd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go @@ -0,0 +1,381 @@ +// Code generated by endpoint/awsrulesfn/internal/partition. DO NOT EDIT. + +package awsrulesfn + +// GetPartition returns an AWS [Partition] for the region provided. If the +// partition cannot be determined nil will be returned. +func GetPartition(region string) *PartitionConfig { + return getPartition(partitions, region) +} + +var partitions = []Partition{ + { + ID: "aws", + RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + }, + Regions: map[string]RegionOverrides{ + "af-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-northeast-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-northeast-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-northeast-3": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-south-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-3": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ap-southeast-4": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "aws-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "ca-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-central-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-north-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-south-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-west-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "eu-west-3": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "il-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "me-central-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "me-south-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "sa-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-east-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-west-2": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-cn", + RegionRegex: "^cn\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-cn", + DnsSuffix: "amazonaws.com.cn", + DualStackDnsSuffix: "api.amazonwebservices.com.cn", + SupportsFIPS: true, + SupportsDualStack: true, + }, + Regions: map[string]RegionOverrides{ + "aws-cn-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "cn-north-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "cn-northwest-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-us-gov", + RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", + SupportsFIPS: true, + SupportsDualStack: true, + }, + Regions: map[string]RegionOverrides{ + "aws-us-gov-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-iso", + RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso", + DnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "c2s.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{ + "aws-iso-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-iso-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-iso-west-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-iso-b", + RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso-b", + DnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "sc2s.sgov.gov", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{ + "aws-iso-b-global": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-isob-east-1": { + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + }, + }, + { + ID: "aws-iso-e", + RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso-e", + DnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "cloud.adc-e.uk", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{}, + }, + { + ID: "aws-iso-f", + RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$", + DefaultConfig: PartitionConfig{ + Name: "aws-iso-f", + DnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "csp.hci.ic.gov", + SupportsFIPS: true, + SupportsDualStack: false, + }, + Regions: map[string]RegionOverrides{}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json new file mode 100644 index 00000000..f376f690 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -0,0 +1,216 @@ +{ + "partitions" : [ { + "id" : "aws", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-east-1", + "name" : "aws", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$", + "regions" : { + "af-south-1" : { + "description" : "Africa (Cape Town)" + }, + "ap-east-1" : { + "description" : "Asia Pacific (Hong Kong)" + }, + "ap-northeast-1" : { + "description" : "Asia Pacific (Tokyo)" + }, + "ap-northeast-2" : { + "description" : "Asia Pacific (Seoul)" + }, + "ap-northeast-3" : { + "description" : "Asia Pacific (Osaka)" + }, + "ap-south-1" : { + "description" : "Asia Pacific (Mumbai)" + }, + "ap-south-2" : { + "description" : "Asia Pacific (Hyderabad)" + }, + "ap-southeast-1" : { + "description" : "Asia Pacific (Singapore)" + }, + "ap-southeast-2" : { + "description" : "Asia Pacific (Sydney)" + }, + "ap-southeast-3" : { + "description" : "Asia Pacific (Jakarta)" + }, + "ap-southeast-4" : { + "description" : "Asia Pacific (Melbourne)" + }, + "aws-global" : { + "description" : "AWS Standard global region" + }, + "ca-central-1" : { + "description" : "Canada (Central)" + }, + "ca-west-1" : { + "description" : "Canada West (Calgary)" + }, + "eu-central-1" : { + "description" : "Europe (Frankfurt)" + }, + "eu-central-2" : { + "description" : "Europe (Zurich)" + }, + "eu-north-1" : { + "description" : "Europe (Stockholm)" + }, + "eu-south-1" : { + "description" : "Europe (Milan)" + }, + "eu-south-2" : { + "description" : "Europe (Spain)" + }, + "eu-west-1" : { + "description" : "Europe (Ireland)" + }, + "eu-west-2" : { + "description" : "Europe (London)" + }, + "eu-west-3" : { + "description" : "Europe (Paris)" + }, + "il-central-1" : { + "description" : "Israel (Tel Aviv)" + }, + "me-central-1" : { + "description" : "Middle East (UAE)" + }, + "me-south-1" : { + "description" : "Middle East (Bahrain)" + }, + "sa-east-1" : { + "description" : "South America (Sao Paulo)" + }, + "us-east-1" : { + "description" : "US East (N. Virginia)" + }, + "us-east-2" : { + "description" : "US East (Ohio)" + }, + "us-west-1" : { + "description" : "US West (N. California)" + }, + "us-west-2" : { + "description" : "US West (Oregon)" + } + } + }, { + "id" : "aws-cn", + "outputs" : { + "dnsSuffix" : "amazonaws.com.cn", + "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", + "implicitGlobalRegion" : "cn-northwest-1", + "name" : "aws-cn", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^cn\\-\\w+\\-\\d+$", + "regions" : { + "aws-cn-global" : { + "description" : "AWS China global region" + }, + "cn-north-1" : { + "description" : "China (Beijing)" + }, + "cn-northwest-1" : { + "description" : "China (Ningxia)" + } + } + }, { + "id" : "aws-us-gov", + "outputs" : { + "dnsSuffix" : "amazonaws.com", + "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", + "name" : "aws-us-gov", + "supportsDualStack" : true, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$", + "regions" : { + "aws-us-gov-global" : { + "description" : "AWS GovCloud (US) global region" + }, + "us-gov-east-1" : { + "description" : "AWS GovCloud (US-East)" + }, + "us-gov-west-1" : { + "description" : "AWS GovCloud (US-West)" + } + } + }, { + "id" : "aws-iso", + "outputs" : { + "dnsSuffix" : "c2s.ic.gov", + "dualStackDnsSuffix" : "c2s.ic.gov", + "implicitGlobalRegion" : "us-iso-east-1", + "name" : "aws-iso", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-global" : { + "description" : "AWS ISO (US) global region" + }, + "us-iso-east-1" : { + "description" : "US ISO East" + }, + "us-iso-west-1" : { + "description" : "US ISO WEST" + } + } + }, { + "id" : "aws-iso-b", + "outputs" : { + "dnsSuffix" : "sc2s.sgov.gov", + "dualStackDnsSuffix" : "sc2s.sgov.gov", + "implicitGlobalRegion" : "us-isob-east-1", + "name" : "aws-iso-b", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$", + "regions" : { + "aws-iso-b-global" : { + "description" : "AWS ISOB (US) global region" + }, + "us-isob-east-1" : { + "description" : "US ISOB East (Ohio)" + } + } + }, { + "id" : "aws-iso-e", + "outputs" : { + "dnsSuffix" : "cloud.adc-e.uk", + "dualStackDnsSuffix" : "cloud.adc-e.uk", + "implicitGlobalRegion" : "eu-isoe-west-1", + "name" : "aws-iso-e", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$", + "regions" : { } + }, { + "id" : "aws-iso-f", + "outputs" : { + "dnsSuffix" : "csp.hci.ic.gov", + "dualStackDnsSuffix" : "csp.hci.ic.gov", + "implicitGlobalRegion" : "us-isof-south-1", + "name" : "aws-iso-f", + "supportsDualStack" : false, + "supportsFIPS" : true + }, + "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$", + "regions" : { } + } ], + "version" : "1.1" +} \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go new file mode 100644 index 00000000..67950ca3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go @@ -0,0 +1,201 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4"} +) + +// Options provide configuration needed to direct how endpoints are resolved. +type Options struct { + // Disable usage of HTTPS (TLS / SSL) + DisableHTTPS bool +} + +// Partitions is a slice of partition +type Partitions []Partition + +// ResolveEndpoint resolves a service endpoint for the given region and options. +func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) { + if len(ps) == 0 { + return aws.Endpoint{}, fmt.Errorf("no partitions found") + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(region) { + continue + } + + return ps[i].ResolveEndpoint(region, opts) + } + + // fallback to first partition format to use when resolving the endpoint. + return ps[0].ResolveEndpoint(region, opts) +} + +// Partition is an AWS partition description for a service and its' region endpoints. +type Partition struct { + ID string + RegionRegex *regexp.Regexp + PartitionEndpoint string + IsRegionalized bool + Defaults Endpoint + Endpoints Endpoints +} + +func (p Partition) canResolveEndpoint(region string) bool { + _, ok := p.Endpoints[region] + return ok || p.RegionRegex.MatchString(region) +} + +// ResolveEndpoint resolves and service endpoint for the given region and options. +func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) { + if len(region) == 0 && len(p.PartitionEndpoint) != 0 { + region = p.PartitionEndpoint + } + + e, _ := p.endpointForRegion(region) + + return e.resolve(p.ID, region, p.Defaults, options), nil +} + +func (p Partition) endpointForRegion(region string) (Endpoint, bool) { + if e, ok := p.Endpoints[region]; ok { + return e, true + } + + if !p.IsRegionalized { + return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return Endpoint{}, false +} + +// Endpoints is a map of service config regions to endpoints +type Endpoints map[string]Endpoint + +// CredentialScope is the credential scope of a region and service +type CredentialScope struct { + Region string + Service string +} + +// Endpoint is a service endpoint description +type Endpoint struct { + // True if the endpoint cannot be resolved for this partition/region/service + Unresolveable aws.Ternary + + Hostname string + Protocols []string + + CredentialScope CredentialScope + + SignatureVersions []string `json:"signatureVersions"` +} + +func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint { + var merged Endpoint + merged.mergeIn(def) + merged.mergeIn(e) + e = merged + + var u string + if e.Unresolveable != aws.TrueTernary { + // Only attempt to resolve the endpoint if it can be resolved. + hostname := strings.Replace(e.Hostname, "{region}", region, 1) + + scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS) + u = scheme + "://" + hostname + } + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + + return aws.Endpoint{ + URL: u, + PartitionID: partition, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func (e *Endpoint) mergeIn(other Endpoint) { + if other.Unresolveable != aws.UnknownTernary { + e.Unresolveable = other.Unresolveable + } + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } +} + +func getEndpointScheme(protocols []string, disableHTTPS bool) string { + if disableHTTPS { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +// MapFIPSRegion extracts the intrinsic AWS region from one that may have an +// embedded FIPS microformat. +func MapFIPSRegion(region string) string { + const fipsInfix = "-fips-" + const fipsPrefix = "fips-" + const fipsSuffix = "-fips" + + if strings.Contains(region, fipsInfix) || + strings.Contains(region, fipsPrefix) || + strings.Contains(region, fipsSuffix) { + region = strings.ReplaceAll(region, fipsInfix, "-") + region = strings.ReplaceAll(region, fipsPrefix, "") + region = strings.ReplaceAll(region, fipsSuffix, "") + } + + return region +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 8bed2112..e0265474 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,96 @@ +# v2.5.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.8 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.37 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.36 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.29 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.4.25 (2023-03-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 0ebdc4a4..bec2c6a1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.4.25" +const goModuleVersion = "2.5.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md index ab404bb2..8aa94972 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -1,3 +1,97 @@ +# v1.2.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.8 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.6 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.4 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.3 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.2 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.1 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.28 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.27 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.26 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.25 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.0.24 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.0.23 (2023-03-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go index ff822ea0..2a5888c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -3,4 +3,4 @@ package v4a // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.0.23" +const goModuleVersion = "1.2.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go index 55d5e8ab..64b8b4e3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/middleware.go @@ -3,13 +3,13 @@ package v4a import ( "context" "fmt" - "net/http" - "time" - awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" + "time" ) // HTTPSigner is SigV4a HTTP signer implementation @@ -71,10 +71,23 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize( return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)} } - err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, []string{signingRegion}, time.Now().UTC(), func(o *SignerOptions) { - o.Logger = middleware.GetLogger(ctx) - o.LogSigning = s.logSigning - }) + signerOptions := []func(o *SignerOptions){ + func(o *SignerOptions) { + o.Logger = middleware.GetLogger(ctx) + o.LogSigning = s.logSigning + }, + } + + // existing DisableURIPathEscaping is equivalent in purpose + // to authentication scheme property DisableDoubleEncoding + disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx) + if overridden { + signerOptions = append(signerOptions, func(o *SignerOptions) { + o.DisableURIPathEscaping = disableDoubleEncoding + }) + } + + err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, []string{signingRegion}, time.Now().UTC(), signerOptions...) if err != nil { return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)} } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go new file mode 100644 index 00000000..516d459d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/smithy.go @@ -0,0 +1,86 @@ +package v4a + +import ( + "context" + "fmt" + "time" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// CredentialsAdapter adapts v4a.Credentials to smithy auth.Identity. +type CredentialsAdapter struct { + Credentials Credentials +} + +var _ auth.Identity = (*CredentialsAdapter)(nil) + +// Expiration returns the time of expiration for the credentials. +func (v *CredentialsAdapter) Expiration() time.Time { + return v.Credentials.Expires +} + +// CredentialsProviderAdapter adapts v4a.CredentialsProvider to +// auth.IdentityResolver. +type CredentialsProviderAdapter struct { + Provider CredentialsProvider +} + +var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil) + +// GetIdentity retrieves v4a credentials using the underlying provider. +func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) ( + auth.Identity, error, +) { + creds, err := v.Provider.RetrievePrivateKey(ctx) + if err != nil { + return nil, fmt.Errorf("get credentials: %w", err) + } + + return &CredentialsAdapter{Credentials: creds}, nil +} + +// SignerAdapter adapts v4a.HTTPSigner to smithy http.Signer. +type SignerAdapter struct { + Signer HTTPSigner + Logger logging.Logger + LogSigning bool +} + +var _ (smithyhttp.Signer) = (*SignerAdapter)(nil) + +// SignRequest signs the request with the provided identity. +func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { + ca, ok := identity.(*CredentialsAdapter) + if !ok { + return fmt.Errorf("unexpected identity type: %T", identity) + } + + name, ok := smithyhttp.GetSigV4SigningName(&props) + if !ok { + return fmt.Errorf("sigv4a signing name is required") + } + + regions, ok := smithyhttp.GetSigV4ASigningRegions(&props) + if !ok { + return fmt.Errorf("sigv4a signing region is required") + } + + hash := v4.GetPayloadHash(ctx) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, regions, sdk.NowTime(), func(o *SignerOptions) { + o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) + + o.Logger = v.Logger + o.LogSigning = v.LogSigning + }) + if err != nil { + return fmt.Errorf("sign http: %w", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index b3998b28..c3525fd2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,39 @@ +# v1.10.4 (2023-12-07) + +* No change notes available for this release. + +# v1.10.3 (2023-11-30) + +* No change notes available for this release. + +# v1.10.2 (2023-11-29) + +* No change notes available for this release. + +# v1.10.1 (2023-11-15) + +* No change notes available for this release. + +# v1.10.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). + +# v1.9.15 (2023-10-06) + +* No change notes available for this release. + +# v1.9.14 (2023-08-18) + +* No change notes available for this release. + +# v1.9.13 (2023-08-07) + +* No change notes available for this release. + +# v1.9.12 (2023-07-31) + +* No change notes available for this release. + # v1.9.11 (2022-12-02) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index f49fa921..cc638400 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.11" +const goModuleVersion = "1.10.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md index 5c2fb5ae..8f974036 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -1,3 +1,96 @@ +# v1.2.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.8 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.38 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.37 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.36 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.35 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.34 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.33 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.32 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.31 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.30 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.29 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.28 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.27 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.26 (2023-03-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go index 29c440e3..a88534d2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -3,4 +3,4 @@ package checksum // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.26" +const goModuleVersion = "1.2.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go index 3e17d221..1b727acb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go @@ -2,7 +2,6 @@ package checksum import ( "github.com/aws/smithy-go/middleware" - smithyhttp "github.com/aws/smithy-go/transport/http" ) // InputMiddlewareOptions provides the options for the request @@ -81,28 +80,25 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) stack.Build.Remove("ContentChecksum") - // Create the compute checksum middleware that will be added as both a - // build and finalize handler. inputChecksum := &computeInputPayloadChecksum{ RequireChecksum: options.RequireChecksum, EnableTrailingChecksum: options.EnableTrailingChecksum, EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash, EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader, } - - // Insert header checksum after ComputeContentLength middleware, must also - // be before the computePayloadHash middleware handlers. - err = stack.Build.Insert(inputChecksum, - (*smithyhttp.ComputeContentLength)(nil).ID(), - middleware.After) - if err != nil { + if err := stack.Finalize.Insert(inputChecksum, "ResolveEndpointV2", middleware.After); err != nil { return err } // If trailing checksum is not supported no need for finalize handler to be added. if options.EnableTrailingChecksum { - err = stack.Finalize.Insert(inputChecksum, "Retry", middleware.After) - if err != nil { + trailerMiddleware := &addInputChecksumTrailer{ + EnableTrailingChecksum: inputChecksum.EnableTrailingChecksum, + RequireChecksum: inputChecksum.RequireChecksum, + EnableComputePayloadHash: inputChecksum.EnableComputePayloadHash, + EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader, + } + if err := stack.Finalize.Insert(trailerMiddleware, "Retry", middleware.After); err != nil { return err } } @@ -117,7 +113,6 @@ func RemoveInputMiddleware(stack *middleware.Stack) { stack.Initialize.Remove(id) id = (*computeInputPayloadChecksum)(nil).ID() - stack.Build.Remove(id) stack.Finalize.Remove(id) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go index 0b3c4891..7ffca33f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go @@ -9,6 +9,8 @@ import ( "strconv" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) @@ -70,10 +72,11 @@ type computeInputPayloadChecksum struct { // when used with trailing checksums, and aws-chunked content-encoding. EnableDecodedContentLengthHeader bool - buildHandlerRun bool - deferToFinalizeHandler bool + useTrailer bool } +type useTrailer struct{} + // ID provides the middleware's identifier. func (m *computeInputPayloadChecksum) ID() string { return "AWSChecksum:ComputeInputPayloadChecksum" @@ -102,13 +105,11 @@ func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err } // // The build handler must be inserted in the stack before ContentPayloadHash // and after ComputeContentLength. -func (m *computeInputPayloadChecksum) HandleBuild( - ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler, +func (m *computeInputPayloadChecksum) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( - out middleware.BuildOutput, metadata middleware.Metadata, err error, + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - m.buildHandlerRun = true - req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, computeInputHeaderChecksumError{ @@ -145,13 +146,13 @@ func (m *computeInputPayloadChecksum) HandleBuild( } algorithm = Algorithm("MD5") } - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } // If the checksum header is already set nothing to do. checksumHeader := AlgorithmHTTPHeader(algorithm) if checksum = req.Header.Get(checksumHeader); checksum != "" { - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } computePayloadHash := m.EnableComputePayloadHash @@ -169,22 +170,19 @@ func (m *computeInputPayloadChecksum) HandleBuild( } // If trailing checksums are supported, the request is HTTPS, and the - // stream is not nil or empty, there is nothing to do in the build stage. - // The checksum will be added to the request as a trailing checksum in the - // finalize handler. + // stream is not nil or empty, instead switch to a trailing checksum. // // Nil and empty streams will always be handled as a request header, // regardless if the operation supports trailing checksums or not. - if req.IsHTTPS() { + if req.IsHTTPS() && !presignedurlcust.GetIsPresigning(ctx) { if stream != nil && streamLength != 0 && m.EnableTrailingChecksum { if m.EnableComputePayloadHash { - // payload hash is set as header in Build middleware handler, - // ContentSHA256Header. + // ContentSHA256Header middleware handles the header ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash) } - - m.deferToFinalizeHandler = true - return next.HandleBuild(ctx, in) + m.useTrailer = true + ctx = middleware.WithStackValue(ctx, useTrailer{}, true) + return next.HandleFinalize(ctx, in) } // If trailing checksums are not enabled but protocol is still HTTPS @@ -225,7 +223,7 @@ func (m *computeInputPayloadChecksum) HandleBuild( ctx = v4.SetPayloadHash(ctx, sha256Checksum) } - return next.HandleBuild(ctx, in) + return next.HandleFinalize(ctx, in) } type computeInputTrailingChecksumError struct { @@ -244,26 +242,31 @@ func (e computeInputTrailingChecksumError) Error() string { } func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err } -// HandleFinalize handles computing the payload's checksum, in the following cases: +// addInputChecksumTrailer // - Is HTTPS, not HTTP // - A checksum was specified via the Input // - Trailing checksums are supported. -// -// The finalize handler must be inserted in the stack before Signing, and after Retry. -func (m *computeInputPayloadChecksum) HandleFinalize( +type addInputChecksumTrailer struct { + EnableTrailingChecksum bool + RequireChecksum bool + EnableComputePayloadHash bool + EnableDecodedContentLengthHeader bool +} + +// ID identifies this middleware. +func (*addInputChecksumTrailer) ID() string { + return "addInputChecksumTrailer" +} + +// HandleFinalize wraps the request body to write the trailing checksum. +func (m *addInputChecksumTrailer) HandleFinalize( ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, ) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - if !m.deferToFinalizeHandler { - if !m.buildHandlerRun { - return out, metadata, computeInputTrailingChecksumError{ - Msg: "build handler was removed without also removing finalize handler", - } - } + if enabled, _ := middleware.GetStackValue(ctx, useTrailer{}).(bool); !enabled { return next.HandleFinalize(ctx, in) } - req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, computeInputTrailingChecksumError{ @@ -374,7 +377,7 @@ func (m *computeInputPayloadChecksum) HandleFinalize( } func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) { - ctxAlgorithm := getContextInputAlgorithm(ctx) + ctxAlgorithm := internalcontext.GetChecksumInputAlgorithm(ctx) if ctxAlgorithm == "" { return "", false, nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go index f7295254..3db73afe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_setup_context.go @@ -3,6 +3,7 @@ package checksum import ( "context" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" "github.com/aws/smithy-go/middleware" ) @@ -35,33 +36,13 @@ func (m *setupInputContext) HandleInitialize( // check is input resource has a checksum algorithm algorithm, ok := m.GetAlgorithm(in.Parameters) if ok && len(algorithm) != 0 { - ctx = setContextInputAlgorithm(ctx, algorithm) + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm) } } return next.HandleInitialize(ctx, in) } -// inputAlgorithmKey is the key set on context used to identify, retrieves the -// request checksum algorithm if present on the context. -type inputAlgorithmKey struct{} - -// setContextInputAlgorithm sets the request checksum algorithm on the context. -// -// Scoped to stack values. -func setContextInputAlgorithm(ctx context.Context, value string) context.Context { - return middleware.WithStackValue(ctx, inputAlgorithmKey{}, value) -} - -// getContextInputAlgorithm returns the checksum algorithm from the context if -// one was specified. Empty string is returned if one is not specified. -// -// Scoped to stack values. -func getContextInputAlgorithm(ctx context.Context) (v string) { - v, _ = middleware.GetStackValue(ctx, inputAlgorithmKey{}).(string) - return v -} - type setupOutputContext struct { // GetValidationMode is a function to get the checksum validation // mode of the output payload from the input parameters. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index fc8eb85a..a65890b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,96 @@ +# v1.10.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.8 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.37 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.36 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.35 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.34 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.33 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.32 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.31 (2023-07-31) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.30 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.29 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.28 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.27 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.26 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.25 (2023-03-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 0aa7a794..073e8866 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.25" +const goModuleVersion = "1.10.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md index 02a44bf5..c4df2176 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -1,3 +1,97 @@ +# v1.16.10 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.9 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.8 (2023-12-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.7 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.6 (2023-11-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.5 (2023-11-28.2) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.4 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.3 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.2 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.4 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.5 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.4 (2023-07-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.3 (2023-06-13) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.2 (2023-04-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.1 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.14.0 (2023-03-21) * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go new file mode 100644 index 00000000..9a3258e1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn/arn_member.go @@ -0,0 +1,32 @@ +package arn + +import "fmt" + +// arnable is implemented by the relevant S3/S3Control +// operations which have members that may need ARN +// processing. +type arnable interface { + SetARNMember(string) error + GetARNMember() (*string, bool) +} + +// GetARNField would be called during middleware execution +// to retrieve a member value that is an ARN in need of +// processing. +func GetARNField(input interface{}) (*string, bool) { + v, ok := input.(arnable) + if !ok { + return nil, false + } + return v.GetARNMember() +} + +// SetARNField would called during middleware exeuction +// to set a member value that required ARN processing. +func SetARNField(input interface{}, v string) error { + params, ok := input.(arnable) + if !ok { + return fmt.Errorf("Params does not contain arn field member") + } + return params.SetARNMember(v) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go index 8926e597..b5d31f5c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config/config.go @@ -7,6 +7,11 @@ type UseARNRegionProvider interface { GetS3UseARNRegion(ctx context.Context) (value bool, found bool, err error) } +// DisableMultiRegionAccessPointsProvider is an interface for retrieving external configuration value for DisableMultiRegionAccessPoints +type DisableMultiRegionAccessPointsProvider interface { + GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value bool, found bool, err error) +} + // ResolveUseARNRegion extracts the first instance of a UseARNRegion from the config slice. // Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. func ResolveUseARNRegion(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { @@ -20,3 +25,17 @@ func ResolveUseARNRegion(ctx context.Context, configs []interface{}) (value bool } return } + +// ResolveDisableMultiRegionAccessPoints extracts the first instance of a DisableMultiRegionAccessPoints from the config slice. +// Additionally returns a boolean to indicate if the value was found in provided configs, and error if one is encountered. +func ResolveDisableMultiRegionAccessPoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(DisableMultiRegionAccessPointsProvider); ok { + value, found, err = p.GetS3DisableMultiRegionAccessPoints(ctx) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go index 90cc9b21..986affe1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -3,4 +3,4 @@ package s3shared // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.0" +const goModuleVersion = "1.16.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md index 6b5a46f2..89a5a0d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -1,3 +1,191 @@ +# v1.48.1 (2024-01-24) + +* No change notes available for this release. + +# v1.48.0 (2024-01-05) + +* **Feature**: Support smithy sigv4a trait for codegen. + +# v1.47.8 (2024-01-04) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.7 (2023-12-20) + +* No change notes available for this release. + +# v1.47.6 (2023-12-18) + +* No change notes available for this release. + +# v1.47.5 (2023-12-08) + +* **Bug Fix**: Add non-vhostable buckets to request path when using legacy V1 endpoint resolver. +* **Bug Fix**: Improve uniqueness of default S3Express sesssion credentials cache keying to prevent collision in multi-credential scenarios. +* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein. + +# v1.47.4 (2023-12-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.3 (2023-12-06) + +* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously. + +# v1.47.2 (2023-12-01) + +* **Bug Fix**: Correct wrapping of errors in authentication workflow. +* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.1 (2023-11-30) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.47.0 (2023-11-29) + +* **Feature**: Expose Options() accessor on service clients. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.46.0 (2023-11-28.2) + +* **Feature**: Add S3Express support. +* **Feature**: Adds support for S3 Express One Zone. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.45.1 (2023-11-28) + +* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction. + +# v1.45.0 (2023-11-27) + +* **Feature**: Adding new params - Key and Prefix, to S3 API operations for supporting S3 Access Grants. Note - These updates will not change any of the existing S3 API functionality. + +# v1.44.0 (2023-11-21) + +* **Feature**: Add support for automatic date based partitioning in S3 Server Access Logs. +* **Bug Fix**: Don't send MaxKeys/MaxUploads=0 when unspecified in ListObjectVersions and ListMultipartUploads paginators. + +# v1.43.1 (2023-11-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.43.0 (2023-11-17) + +* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162. +* **Feature**: Removes all default 0 values for numbers and false values for booleans + +# v1.42.2 (2023-11-15) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.42.1 (2023-11-09) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.42.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.0 (2023-09-26) + +* **Feature**: This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK. + +# v1.39.0 (2023-09-20) + +* **Feature**: Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException + +# v1.38.5 (2023-08-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.4 (2023-08-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.3 (2023-08-17) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.2 (2023-08-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.38.1 (2023-08-01) + +* No change notes available for this release. + +# v1.38.0 (2023-07-31) + +* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.1 (2023-07-28) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.37.0 (2023-07-13) + +* **Feature**: S3 Inventory now supports Object Access Control List and Object Owner as available object metadata fields in inventory reports. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.36.0 (2023-06-28) + +* **Feature**: The S3 LISTObjects, ListObjectsV2 and ListObjectVersions API now supports a new optional header x-amz-optional-object-attributes. If header contains RestoreStatus as the value, then S3 will include Glacier restore status i.e. isRestoreInProgress and RestoreExpiryDate in List response. + +# v1.35.0 (2023-06-16) + +* **Feature**: This release adds SDK support for request-payer request header and request-charged response header in the "GetBucketAccelerateConfiguration", "ListMultipartUploads", "ListObjects", "ListObjectsV2" and "ListObjectVersions" S3 APIs. + +# v1.34.1 (2023-06-15) + +* No change notes available for this release. + +# v1.34.0 (2023-06-13) + +* **Feature**: Integrate double encryption feature to SDKs. +* **Bug Fix**: Fix HeadObject to return types.Nound when an object does not exist. Fixes [2084](https://github.com/aws/aws-sdk-go-v2/issues/2084) +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.33.1 (2023-05-04) + +* **Documentation**: Documentation updates for Amazon S3 + +# v1.33.0 (2023-04-24) + +* **Feature**: added custom paginators for listMultipartUploads and ListObjectVersions +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.32.0 (2023-04-19) + +* **Feature**: Provides support for "Snow" Storage class. + +# v1.31.3 (2023-04-10) + +* No change notes available for this release. + +# v1.31.2 (2023-04-07) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.1 (2023-03-31) + +* **Documentation**: Documentation updates for Amazon S3 + # v1.31.0 (2023-03-21) * **Feature**: port v1 sdk 100-continue http header customization for s3 PutObject/UploadPart request and enable user config diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go index e462917a..db35814d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -11,6 +11,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" "github.com/aws/aws-sdk-go-v2/internal/v4a" acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding" @@ -32,8 +34,8 @@ import ( const ServiceID = "S3" const ServiceAPIVersion = "2006-03-01" -// Client provides the API client to make operations call for Amazon Simple Storage -// Service. +// Client provides the API client to make operations call for Amazon Simple +// Storage Service. type Client struct { options Options } @@ -54,172 +56,62 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveHTTPSignerV4(&options) - resolveDefaultEndpointConfiguration(&options) + resolveEndpointResolverV2(&options) resolveHTTPSignerV4a(&options) + resolveAuthSchemeResolver(&options) + for _, fn := range optFns { fn(&options) } - resolveCredentialProvider(&options) - - client := &Client{ - options: options, - } - - return client -} - -type Options struct { - // Set of options to modify how an operation is invoked. These apply to all - // operations invoked for this client. Use functional options on operation call to - // modify this list for per operation behavior. - APIOptions []func(*middleware.Stack) error - - // Configures the events that will be sent to the configured logger. - ClientLogMode aws.ClientLogMode - - // The threshold ContentLength in bytes for HTTP PUT request to receive {Expect: - // 100-continue} header. Setting to -1 will disable adding the Expect header to - // requests; setting to 0 will set the threshold to default 2MB - ContinueHeaderThresholdBytes int64 - - // The credentials object to use when signing requests. - Credentials aws.CredentialsProvider - - // The configuration DefaultsMode that the SDK should use when constructing the - // clients initial default settings. - DefaultsMode aws.DefaultsMode - - // Allows you to disable S3 Multi-Region access points feature. - DisableMultiRegionAccessPoints bool - - // The endpoint options to be used when attempting to resolve an endpoint. - EndpointOptions EndpointResolverOptions - - // The service endpoint resolver. - EndpointResolver EndpointResolver - - // Signature Version 4 (SigV4) Signer - HTTPSignerV4 HTTPSignerV4 - - // The logger writer interface to write logging messages to. - Logger logging.Logger - - // The region to send requests to. (Required) - Region string - - // RetryMaxAttempts specifies the maximum number attempts an API client will call - // an operation that fails with a retryable error. A value of 0 is ignored, and - // will not be used to configure the API client created default retryer, or modify - // per operation call's retry max attempts. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. If specified in an operation call's functional - // options with a value that is different than the constructed client's Options, - // the Client's Retryer will be wrapped to use the operation's specific - // RetryMaxAttempts value. - RetryMaxAttempts int - - // RetryMode specifies the retry mode the API client will be created with, if - // Retryer option is not also specified. When creating a new API Clients this - // member will only be used if the Retryer Options member is nil. This value will - // be ignored if Retryer is not nil. Currently does not support per operation call - // overrides, may in the future. - RetryMode aws.RetryMode - - // Retryer guides how HTTP requests should be retried in case of recoverable - // failures. When nil the API client will use a default retryer. The kind of - // default retry created by the API client can be changed with the RetryMode - // option. - Retryer aws.Retryer + finalizeRetryMaxAttempts(&options) - // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set - // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You - // should not populate this structure programmatically, or rely on the values here - // within your applications. - RuntimeEnvironment aws.RuntimeEnvironment + ignoreAnonymousAuth(&options) - // Allows you to enable arn region support for the service. - UseARNRegion bool + resolveExpressCredentials(&options) - // Allows you to enable S3 Accelerate feature. All operations compatible with S3 - // Accelerate will use the accelerate endpoint for requests. Requests not - // compatible will fall back to normal S3 requests. The bucket must be enabled for - // accelerate to be used with S3 client with accelerate enabled. If the bucket is - // not enabled for accelerate an error will be returned. The bucket name must be - // DNS compatible to work with accelerate. - UseAccelerate bool + finalizeServiceEndpointAuthResolver(&options) - // Allows you to enable dual-stack endpoint support for the service. - // - // Deprecated: Set dual-stack by setting UseDualStackEndpoint on - // EndpointResolverOptions. When EndpointResolverOptions' UseDualStackEndpoint - // field is set it overrides this field value. - UseDualstack bool + resolveAuthSchemes(&options) - // Allows you to enable the client to use path-style addressing, i.e., - // https://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will use virtual - // hosted bucket addressing when possible(https://BUCKET.s3.amazonaws.com/KEY). - UsePathStyle bool - - // Signature Version 4a (SigV4a) Signer - httpSignerV4a httpSignerV4a - - // The initial DefaultsMode used when the client options were constructed. If the - // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved - // value was at that point in time. Currently does not support per operation call - // overrides, may in the future. - resolvedDefaultsMode aws.DefaultsMode - - // The HTTP client to invoke API calls with. Defaults to client's default HTTP - // implementation if nil. - HTTPClient HTTPClient -} - -// WithAPIOptions returns a functional option for setting the Client's APIOptions -// option. -func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { - return func(o *Options) { - o.APIOptions = append(o.APIOptions, optFns...) + client := &Client{ + options: options, } -} -// WithEndpointResolver returns a functional option for setting the Client's -// EndpointResolver option. -func WithEndpointResolver(v EndpointResolver) func(*Options) { - return func(o *Options) { - o.EndpointResolver = v - } -} + finalizeExpressCredentials(&options, client) -type HTTPClient interface { - Do(*http.Request) (*http.Response, error) + return client } -// Copy creates a clone where the APIOptions list is deep copied. -func (o Options) Copy() Options { - to := o - to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) - copy(to.APIOptions, o.APIOptions) - - return to +// Options returns a copy of the client configuration. +// +// Callers SHOULD NOT perform mutations on any inner structures within client +// config. Config overrides should instead be made on a per-operation basis through +// functional options. +func (c *Client) Options() Options { + return c.options.Copy() } + func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) { ctx = middleware.ClearStackValues(ctx) stack := middleware.NewStack(opID, smithyhttp.NewStackRequest) options := c.options.Copy() + for _, fn := range optFns { fn(&options) } setSafeEventStreamClientLogMode(&options, opID) - finalizeRetryMaxAttemptOptions(&options, *c) + finalizeOperationRetryMaxAttempts(&options, *c) finalizeClientEndpointResolverOptions(&options) - resolveCredentialProvider(&options) + finalizeOperationExpressCredentials(&options, *c) + + finalizeOperationEndpointAuthResolver(&options) for _, fn := range stackFns { if err := fn(stack, options); err != nil { @@ -245,8 +137,99 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf return result, metadata, err } +type operationInputKey struct{} + +func setOperationInput(ctx context.Context, input interface{}) context.Context { + return middleware.WithStackValue(ctx, operationInputKey{}, input) +} + +func getOperationInput(ctx context.Context) interface{} { + return middleware.GetStackValue(ctx, operationInputKey{}) +} + +type setOperationInputMiddleware struct { +} + +func (*setOperationInputMiddleware) ID() string { + return "setOperationInput" +} + +func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + ctx = setOperationInput(ctx, in.Parameters) + return next.HandleSerialize(ctx, in) +} + +func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error { + if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil { + return fmt.Errorf("add ResolveAuthScheme: %w", err) + } + if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil { + return fmt.Errorf("add GetIdentity: %v", err) + } + if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil { + return fmt.Errorf("add ResolveEndpointV2: %v", err) + } + if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil { + return fmt.Errorf("add Signing: %w", err) + } + return nil +} +func resolveAuthSchemeResolver(options *Options) { + if options.AuthSchemeResolver == nil { + options.AuthSchemeResolver = &defaultAuthSchemeResolver{} + } +} + +func resolveAuthSchemes(options *Options) { + if options.AuthSchemes == nil { + options.AuthSchemes = []smithyhttp.AuthScheme{ + internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + internalauth.NewHTTPAuthScheme("com.amazonaws.s3#sigv4express", &s3cust.ExpressSigner{ + Signer: options.HTTPSignerV4, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + internalauth.NewHTTPAuthScheme("aws.auth#sigv4a", &v4a.SignerAdapter{ + Signer: options.httpSignerV4a, + Logger: options.Logger, + LogSigning: options.ClientLogMode.IsSigning(), + }), + } + } +} + type noSmithyDocumentSerde = smithydocument.NoSerde +type legacyEndpointContextSetter struct { + LegacyResolver EndpointResolver +} + +func (*legacyEndpointContextSetter) ID() string { + return "legacyEndpointContextSetter" +} + +func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + if m.LegacyResolver != nil { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true) + } + + return next.HandleInitialize(ctx, in) + +} +func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error { + return stack.Initialize.Add(&legacyEndpointContextSetter{ + LegacyResolver: o.EndpointResolver, + }, middleware.Before) +} + func resolveDefaultLogger(o *Options) { if o.Logger != nil { return @@ -284,14 +267,18 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { APIOptions: cfg.APIOptions, Logger: cfg.Logger, ClientLogMode: cfg.ClientLogMode, + AppID: cfg.AppID, } resolveAWSRetryerProvider(cfg, &opts) resolveAWSRetryMaxAttempts(cfg, &opts) resolveAWSRetryMode(cfg, &opts) resolveAWSEndpointResolver(cfg, &opts) resolveUseARNRegion(cfg, &opts) + resolveDisableMultiRegionAccessPoints(cfg, &opts) + resolveDisableExpressAuth(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) return New(opts, optFns...) } @@ -383,7 +370,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) { o.RetryMaxAttempts = cfg.RetryMaxAttempts } -func finalizeRetryMaxAttemptOptions(o *Options, client Client) { +func finalizeRetryMaxAttempts(o *Options) { + if o.RetryMaxAttempts == 0 { + return + } + + o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts) +} + +func finalizeOperationRetryMaxAttempts(o *Options, client Client) { if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts { return } @@ -395,20 +390,19 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) { if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil { return } - o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver()) + o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions) } -func addClientUserAgent(stack *middleware.Stack) error { - return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion)(stack) -} +func addClientUserAgent(stack *middleware.Stack, options Options) error { + if err := awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "s3", goModuleVersion)(stack); err != nil { + return err + } -func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error { - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: o.Credentials, - Signer: o.HTTPSignerV4, - LogSigning: o.ClientLogMode.IsSigning(), - }) - return stack.Finalize.Add(mw, middleware.After) + if len(options.AppID) > 0 { + return awsmiddleware.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)(stack) + } + + return nil } type HTTPSignerV4 interface { @@ -453,6 +447,21 @@ func resolveUseARNRegion(cfg aws.Config, o *Options) error { return nil } +// resolves DisableMultiRegionAccessPoints S3 configuration +func resolveDisableMultiRegionAccessPoints(cfg aws.Config, o *Options) error { + if len(cfg.ConfigSources) == 0 { + return nil + } + value, found, err := s3sharedconfig.ResolveDisableMultiRegionAccessPoints(context.Background(), cfg.ConfigSources) + if err != nil { + return err + } + if found { + o.DisableMultiRegionAccessPoints = value + } + return nil +} + // resolves dual-stack endpoint configuration func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error { if len(cfg.ConfigSources) == 0 { @@ -483,33 +492,6 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } -func resolveCredentialProvider(o *Options) { - if o.Credentials == nil { - return - } - - if _, ok := o.Credentials.(v4a.CredentialsProvider); ok { - return - } - - if aws.IsCredentialsProvider(o.Credentials, (*aws.AnonymousCredentials)(nil)) { - return - } - - o.Credentials = &v4a.SymmetricCredentialAdaptor{SymmetricProvider: o.Credentials} -} - -func swapWithCustomHTTPSignerMiddleware(stack *middleware.Stack, o Options) error { - mw := s3cust.NewSignHTTPRequestMiddleware(s3cust.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: o.Credentials, - V4Signer: o.HTTPSignerV4, - V4aSigner: o.httpSignerV4a, - LogSigning: o.ClientLogMode.IsSigning(), - }) - - return s3cust.RegisterSigningMiddleware(stack, mw) -} - type httpSignerV4a interface { SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, service string, regionSet []string, signingTime time.Time, @@ -527,7 +509,6 @@ func newDefaultV4aSigner(o Options) *v4a.Signer { return v4a.NewSigner(func(so *v4a.SignerOptions) { so.Logger = o.Logger so.LogSigning = o.ClientLogMode.IsSigning() - so.DisableURIPathEscaping = true }) } @@ -539,8 +520,8 @@ func add100Continue(stack *middleware.Stack, options Options) error { return s3shared.Add100Continue(stack, options.ContinueHeaderThresholdBytes) } -// ComputedInputChecksumsMetadata provides information about the algorithms used to -// compute the checksum(s) of the input payload. +// ComputedInputChecksumsMetadata provides information about the algorithms used +// to compute the checksum(s) of the input payload. type ComputedInputChecksumsMetadata struct { // ComputedChecksums is a map of algorithm name to checksum value of the computed // input payload's checksums. @@ -560,8 +541,8 @@ func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChec } -// ChecksumValidationMetadata contains metadata such as the checksum algorithm used -// for data integrity validation. +// ChecksumValidationMetadata contains metadata such as the checksum algorithm +// used for data integrity validation. type ChecksumValidationMetadata struct { // AlgorithmsUsed is the set of the checksum algorithms used to validate the // response payload. The response payload must be completely read in order for the @@ -570,10 +551,10 @@ type ChecksumValidationMetadata struct { AlgorithmsUsed []string } -// GetChecksumValidationMetadata returns the set of algorithms that will be used to -// validate the response payload with. The response payload must be completely read -// in order for the checksum validation to be performed. An error is returned by -// the operation output's response io.ReadCloser if the computed checksums are +// GetChecksumValidationMetadata returns the set of algorithms that will be used +// to validate the response payload with. The response payload must be completely +// read in order for the checksum validation to be performed. An error is returned +// by the operation output's response io.ReadCloser if the computed checksums are // invalid. Returns false if no checksum algorithm used metadata was found. func GetChecksumValidationMetadata(m middleware.Metadata) (ChecksumValidationMetadata, bool) { values, ok := internalChecksum.GetOutputValidationAlgorithmsUsed(m) @@ -600,8 +581,8 @@ func disableAcceptEncodingGzip(stack *middleware.Stack) error { return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{}) } -// ResponseError provides the HTTP centric error type wrapping the underlying error -// with the HTTP response value and the deserialized RequestID. +// ResponseError provides the HTTP centric error type wrapping the underlying +// error with the HTTP response value and the deserialized RequestID. type ResponseError interface { error @@ -611,8 +592,8 @@ type ResponseError interface { var _ ResponseError = (*s3shared.ResponseError)(nil) -// GetHostIDMetadata retrieves the host id from middleware metadata returns host id -// as string along with a boolean indicating presence of hostId on middleware +// GetHostIDMetadata retrieves the host id from middleware metadata returns host +// id as string along with a boolean indicating presence of hostId on middleware // metadata. func GetHostIDMetadata(metadata middleware.Metadata) (string, bool) { return s3shared.GetHostIDMetadata(metadata) @@ -722,29 +703,77 @@ func withNopHTTPClientAPIOption(o *Options) { o.HTTPClient = smithyhttp.NopClient{} } +type presignContextPolyfillMiddleware struct { +} + +func (*presignContextPolyfillMiddleware) ID() string { + return "presignContextPolyfill" +} + +func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + schemeID := rscheme.Scheme.SchemeID() + ctx = s3cust.SetSignerVersion(ctx, schemeID) + if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" { + if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningName(ctx, sn) + } + if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningRegion(ctx, sr) + } + } else if schemeID == "aws.auth#sigv4a" { + if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningName(ctx, sn) + } + if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok { + ctx = awsmiddleware.SetSigningRegion(ctx, sr[0]) + } + } + + return next.HandleFinalize(ctx, in) +} + type presignConverter PresignOptions func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) { - stack.Finalize.Clear() + if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok { + stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID()) + } + if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok { + stack.Finalize.Remove((*retry.Attempt)(nil).ID()) + } + if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok { + stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID()) + } stack.Deserialize.Clear() stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID()) stack.Build.Remove("UserAgent") + if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil { + return err + } + pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ CredentialsProvider: options.Credentials, Presigner: c.Presigner, LogSigning: options.ClientLogMode.IsSigning(), }) - err = stack.Finalize.Add(pmw, middleware.After) - if err != nil { + if _, err := stack.Finalize.Swap("Signing", pmw); err != nil { return err } if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil { return err } - // add multi-region access point presigner + // extended s3 presigning signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{ CredentialsProvider: options.Credentials, + ExpressCredentials: options.ExpressCredentials, V4Presigner: c.Presigner, V4aPresigner: c.presignerV4a, LogSigning: options.ClientLogMode.IsSigning(), @@ -778,3 +807,32 @@ func addRequestResponseLogging(stack *middleware.Stack, o Options) error { LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(), }, middleware.After) } + +type disableHTTPSMiddleware struct { + DisableHTTPS bool +} + +func (*disableHTTPSMiddleware) ID() string { + return "disableHTTPS" +} + +func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) { + req.URL.Scheme = "http" + } + + return next.HandleFinalize(ctx, in) +} + +func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error { + return stack.Finalize.Insert(&disableHTTPSMiddleware{ + DisableHTTPS: o.EndpointOptions.DisableHTTPS, + }, "ResolveEndpointV2", middleware.After) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go index 042e848a..d1e7dcea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,39 +13,45 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action aborts a multipart upload. After a multipart upload is aborted, no -// additional parts can be uploaded using that upload ID. The storage consumed by -// any previously uploaded parts will be freed. However, if any part uploads are +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads are // currently in progress, those part uploads might or might not succeed. As a // result, it might be necessary to abort a given multipart upload multiple times // in order to completely free all storage consumed by all parts. To verify that -// all parts have been removed, so you don't get charged for the part storage, you -// should call the ListParts -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) action and -// ensure that the parts list is empty. For information about permissions required -// to use the multipart upload, see Multipart Upload and Permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The -// following operations are related to AbortMultipartUpload: +// all parts have been removed and prevent getting charged for the part storage, +// you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// API operation and ensure that the parts list is empty. Directory buckets - For +// directory buckets, you must make requests for this API operation to the Zonal +// endpoint. These endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * -// CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// * -// UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// * -// CompleteMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// * -// ListParts -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// * -// ListMultipartUploads -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to AbortMultipartUpload : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipartUploadInput, optFns ...func(*Options)) (*AbortMultipartUploadOutput, error) { if params == nil { params = &AbortMultipartUploadInput{} @@ -62,23 +69,31 @@ func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipar type AbortMultipartUploadInput struct { - // The bucket name to which the upload was taking place. When using this action - // with an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The bucket name to which the upload was taking place. Directory buckets - When + // you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -93,26 +108,34 @@ type AbortMultipartUploadInput struct { // This member is required. UploadId *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer noSmithyDocumentSerde } +func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type AbortMultipartUploadOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -122,6 +145,9 @@ type AbortMultipartUploadOutput struct { } func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpAbortMultipartUpload{}, middleware.After) if err != nil { return err @@ -130,6 +156,13 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "AbortMultipartUpload"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -148,16 +181,13 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -166,7 +196,10 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil { @@ -178,6 +211,9 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addAbortMultipartUploadUpdateEndpoint(stack, options); err != nil { return err } @@ -193,14 +229,26 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *AbortMultipartUploadInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opAbortMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "AbortMultipartUpload", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go index 95ff6496..b9f094f1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -13,91 +14,87 @@ import ( ) // Completes a multipart upload by assembling previously uploaded parts. You first -// initiate the multipart upload and then upload all parts using the UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation. -// After successfully uploading all relevant parts of an upload, you call this -// action to complete the upload. Upon receiving this request, Amazon S3 -// concatenates all the parts in ascending order by part number to create a new -// object. In the Complete Multipart Upload request, you must provide the parts -// list. You must ensure that the parts list is complete. This action concatenates -// the parts that you provide in the list. For each part in the list, you must -// provide the part number and the ETag value, returned after that part was -// uploaded. Processing of a Complete Multipart Upload request could take several -// minutes to complete. After Amazon S3 begins processing the request, it sends an -// HTTP response header that specifies a 200 OK response. While processing is in -// progress, Amazon S3 periodically sends white space characters to keep the -// connection from timing out. Because a request could fail after the initial 200 -// OK response has been sent, it is important that you check the response body to -// determine whether the request succeeded. Note that if CompleteMultipartUpload -// fails, applications should be prepared to retry the failed requests. For more -// information, see Amazon S3 Error Best Practices -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). You -// cannot use Content-Type: application/x-www-form-urlencoded with Complete -// Multipart Upload requests. Also, if you do not provide a Content-Type header, -// CompleteMultipartUpload returns a 200 OK response. For more information about -// multipart uploads, see Uploading Objects Using Multipart Upload -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For -// information about permissions required to use the multipart upload API, see -// Multipart Upload and Permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). -// CompleteMultipartUpload has the following special errors: +// initiate the multipart upload and then upload all parts using the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. After successfully uploading all relevant parts of an upload, you +// call this CompleteMultipartUpload operation to complete the upload. Upon +// receiving this request, Amazon S3 concatenates all the parts in ascending order +// by part number to create a new object. In the CompleteMultipartUpload request, +// you must provide the parts list and ensure that the parts list is complete. The +// CompleteMultipartUpload API operation concatenates the parts that you provide in +// the list. For each part in the list, you must provide the PartNumber value and +// the ETag value that are returned after that part was uploaded. The processing +// of a CompleteMultipartUpload request could take several minutes to finalize. +// After Amazon S3 begins processing the request, it sends an HTTP response header +// that specifies a 200 OK response. While processing is in progress, Amazon S3 +// periodically sends white space characters to keep the connection from timing +// out. A request could fail after the initial 200 OK response has been sent. This +// means that a 200 OK response can contain either a success or an error. The +// error response might be embedded in the 200 OK response. If you call this API +// operation directly, make sure to design your application to parse the contents +// of the response and handle it appropriately. If you use Amazon Web Services +// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply +// error handling per your configuration settings (including automatically retrying +// the request as appropriate). If the condition persists, the SDKs throw an +// exception (or, for the SDKs that don't use exceptions, they return an error). +// Note that if CompleteMultipartUpload fails, applications should be prepared to +// retry the failed requests. For more information, see Amazon S3 Error Best +// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html) +// . You can't use Content-Type: application/x-www-form-urlencoded for the +// CompleteMultipartUpload requests. Also, if you don't provide a Content-Type +// header, CompleteMultipartUpload can still return a 200 OK response. For more +// information about multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * Error code: -// EntityTooSmall +// Special errors +// - Error Code: EntityTooSmall +// - Description: Your proposed upload is smaller than the minimum allowed +// object size. Each part must be at least 5 MB in size, except the last part. +// - HTTP Status Code: 400 Bad Request +// - Error Code: InvalidPart +// - Description: One or more of the specified parts could not be found. The +// part might not have been uploaded, or the specified ETag might not have matched +// the uploaded part's ETag. +// - HTTP Status Code: 400 Bad Request +// - Error Code: InvalidPartOrder +// - Description: The list of parts was not in ascending order. The parts list +// must be specified in order by part number. +// - HTTP Status Code: 400 Bad Request +// - Error Code: NoSuchUpload +// - Description: The specified multipart upload does not exist. The upload ID +// might be invalid, or the multipart upload might have been aborted or completed. +// - HTTP Status Code: 404 Not Found // -// * Description: Your proposed upload is smaller than the minimum -// allowed object size. Each part must be at least 5 MB in size, except the last -// part. -// -// * 400 Bad Request -// -// * Error code: InvalidPart -// -// * Description: One or more -// of the specified parts could not be found. The part might not have been -// uploaded, or the specified entity tag might not have matched the part's entity -// tag. -// -// * 400 Bad Request -// -// * Error code: InvalidPartOrder -// -// * Description: The list -// of parts was not in ascending order. The parts list must be specified in order -// by part number. -// -// * 400 Bad Request -// -// * Error code: NoSuchUpload -// -// * Description: -// The specified multipart upload does not exist. The upload ID might be invalid, -// or the multipart upload might have been aborted or completed. -// -// * 404 Not -// Found -// -// The following operations are related to CompleteMultipartUpload: -// -// * -// CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// * -// UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// * -// AbortMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// * -// ListParts -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// * -// ListMultipartUploads -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to CompleteMultipartUpload : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMultipartUploadInput, optFns ...func(*Options)) (*CompleteMultipartUploadOutput, error) { if params == nil { params = &CompleteMultipartUploadInput{} @@ -115,23 +112,31 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMu type CompleteMultipartUploadInput struct { - // Name of the bucket to which the multipart upload was initiated. When using this - // action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // Name of the bucket to which the multipart upload was initiated. Directory + // buckets - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -149,130 +154,125 @@ type CompleteMultipartUploadInput struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The container for the multipart upload request information. MultipartUpload *types.CompletedMultipartUpload // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This - // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // parameter is required only when the object was created using a checksum + // algorithm or if your bucket policy requires the use of SSE-C. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde } +func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. Does not return - // the access point ARN or access point alias if used. When using this action with - // an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // the access point ARN or access point alias if used. Access points are not + // supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -282,13 +282,13 @@ type CompleteMultipartUploadOutput struct { // data. If the entity tag is not an MD5 digest of the object data, it will contain // one or more nonhexadecimal characters and/or will consist of less than 32 or // more than 32 hexadecimal digits. For more information about how the entity tag - // is calculated, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ETag *string - // If the object expiration is configured, this will contain the expiration date - // (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + // If the object expiration is configured, this will contain the expiration date ( + // expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded. + // This functionality is not supported for directory buckets. Expiration *string // The object key of the newly created object. @@ -298,22 +298,21 @@ type CompleteMultipartUploadOutput struct { Location *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for the - // object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // If you specified server-side encryption either with an Amazon S3-managed - // encryption key or an Amazon Web Services KMS key in your initiate multipart - // upload request, the response includes this header. It confirms the encryption - // algorithm that Amazon S3 used to encrypt the object. + // The server-side encryption algorithm used when storing this object in Amazon S3 + // (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption - // Version ID of the newly created object, in case the bucket has versioning turned - // on. + // Version ID of the newly created object, in case the bucket has versioning + // turned on. This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -323,6 +322,9 @@ type CompleteMultipartUploadOutput struct { } func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpCompleteMultipartUpload{}, middleware.After) if err != nil { return err @@ -331,6 +333,13 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CompleteMultipartUpload"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -349,16 +358,13 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -367,7 +373,10 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil { @@ -379,6 +388,9 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addCompleteMultipartUploadUpdateEndpoint(stack, options); err != nil { return err } @@ -397,14 +409,26 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *CompleteMultipartUploadInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opCompleteMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "CompleteMultipartUpload", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go index 47790077..deb21cee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go @@ -4,11 +4,13 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" "time" ) @@ -18,146 +20,92 @@ import ( // up to 5 GB in size in a single atomic action using this API. However, to copy an // object greater than 5 GB, you must use the multipart upload Upload Part - Copy // (UploadPartCopy) API. For more information, see Copy Object Using the REST -// Multipart Upload API -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). -// All copy requests must be authenticated. Additionally, you must have read access -// to the source object and write access to the destination bucket. For more -// information, see REST Authentication -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). Both -// the Region that you want to copy the object from and the Region that you want to -// copy the object to must be enabled for your account. A copy request might return -// an error when Amazon S3 receives the copy request or while Amazon S3 is copying -// the files. If the error occurs before the copy action starts, you receive a -// standard Amazon S3 error. If the error occurs during the copy operation, the -// error response is embedded in the 200 OK response. This means that a 200 OK -// response can contain either a success or an error. Design your application to -// parse the contents of the response and handle it appropriately. If the copy is -// successful, you receive a response with information about the copied object. If -// the request is an HTTP 1.1 request, the response is chunk encoded. If it were -// not, it would not contain the content-length, and you would need to read the -// entire body. The copy request charge is based on the storage class and Region -// that you specify for the destination object. For pricing information, see Amazon -// S3 pricing (http://aws.amazon.com/s3/pricing/). Amazon S3 transfer acceleration -// does not support cross-Region copies. If you request a cross-Region copy using a -// transfer acceleration endpoint, you get a 400 Bad Request error. For more -// information, see Transfer Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). -// Metadata When copying an object, you can preserve all metadata (default) or -// specify new metadata. However, the ACL is not preserved and is set to private -// for the user making the request. To override the default ACL setting, specify a -// new ACL when generating a copy request. For more information, see Using ACLs -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). To -// specify whether you want the object metadata copied from the source object or -// replaced with metadata provided in the request, you can optionally add the -// x-amz-metadata-directive header. When you grant permissions, you can use the -// s3:x-amz-metadata-directive condition key to enforce certain metadata behavior -// when objects are uploaded. For more information, see Specifying Conditions in a -// Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) in -// the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition -// keys, see Actions, Resources, and Condition Keys for Amazon S3 -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). -// x-amz-copy-source-if Headers To only copy an object under certain conditions, -// such as whether the Etag matches or whether the object was modified before or -// after a specified date, use the following request parameters: +// Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html) +// . You can copy individual objects between general purpose buckets, between +// directory buckets, and between general purpose buckets and directory buckets. +// Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Both the Region that you want to copy the object +// from and the Region that you want to copy the object to must be enabled for your +// account. Amazon S3 transfer acceleration does not support cross-Region copies. +// If you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// . Authentication and authorization All CopyObject requests must be +// authenticated and signed by using IAM credentials (access key ID and secret +// access key for the IAM identities). All headers with the x-amz- prefix, +// including x-amz-copy-source , must be signed. For more information, see REST +// Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +// . Directory buckets - You must use the IAM credentials to authenticate and +// authorize your access to the CopyObject API operation, instead of using the +// temporary security credentials through the CreateSession API operation. Amazon +// Web Services CLI or SDKs handles authentication and authorization on your +// behalf. Permissions You must have read access to the source object and write +// access to the destination bucket. +// - General purpose bucket permissions - You must have permissions in an IAM +// policy based on the source and destination bucket types in a CopyObject +// operation. +// - If the source object is in a general purpose bucket, you must have +// s3:GetObject permission to read the source object that is being copied. +// - If the destination bucket is a general purpose bucket, you must have +// s3:PubObject permission to write the object copy to the destination bucket. +// - Directory bucket permissions - You must have permissions in a bucket policy +// or an IAM identity-based policy based on the source and destination bucket types +// in a CopyObject operation. +// - If the source object that you want to copy is in a directory bucket, you +// must have the s3express:CreateSession permission in the Action element of a +// policy to read the object. By default, the session is in the ReadWrite mode. +// If you want to restrict the access, you can explicitly set the +// s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// - If the copy destination is a directory bucket, you must have the +// s3express:CreateSession permission in the Action element of a policy to write +// the object to the destination. The s3express:SessionMode condition key can't +// be set to ReadOnly on the copy destination bucket. For example policies, see +// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. // -// * -// x-amz-copy-source-if-match +// Response and special errors When the request is an HTTP 1.1 request, the +// response is chunk encoded. When the request is not an HTTP 1.1 request, the +// response would not contain the Content-Length . You always need to read the +// entire response body to check if the copy succeeds. to keep the connection alive +// while we copy the data. +// - If the copy is successful, you receive a response with information about +// the copied object. +// - A copy request might return an error when Amazon S3 receives the copy +// request or while Amazon S3 is copying the files. A 200 OK response can contain +// either a success or an error. +// - If the error occurs before the copy action starts, you receive a standard +// Amazon S3 error. +// - If the error occurs during the copy operation, the error response is +// embedded in the 200 OK response. For example, in a cross-region copy, you may +// encounter throttling and receive a 200 OK response. For more information, see +// Resolve the Error 200 response when copying objects to Amazon S3 . The 200 OK +// status code means the copy was accepted, but it doesn't mean the copy is +// complete. Another example is when you disconnect from Amazon S3 before the copy +// is complete, Amazon S3 might cancel the copy and you may receive a 200 OK +// response. You must stay connected to Amazon S3 until the entire response is +// successfully received and processed. If you call this API operation directly, +// make sure to design your application to parse the content of the response and +// handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this +// condition. The SDKs detect the embedded error and apply error handling per your +// configuration settings (including automatically retrying the request as +// appropriate). If the condition persists, the SDKs throw an exception (or, for +// the SDKs that don't use exceptions, they return an error). // -// * x-amz-copy-source-if-none-match -// -// * -// x-amz-copy-source-if-unmodified-since -// -// * x-amz-copy-source-if-modified-since -// -// If -// both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request and evaluate as follows, Amazon S3 returns -// 200 OK and copies the data: -// -// * x-amz-copy-source-if-match condition evaluates to -// true -// -// * x-amz-copy-source-if-unmodified-since condition evaluates to false -// -// If -// both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since -// headers are present in the request and evaluate as follows, Amazon S3 returns -// the 412 Precondition Failed response code: -// -// * x-amz-copy-source-if-none-match -// condition evaluates to false -// -// * x-amz-copy-source-if-modified-since condition -// evaluates to true -// -// All headers with the x-amz- prefix, including -// x-amz-copy-source, must be signed. Server-side encryption When you perform a -// CopyObject operation, you can optionally use the appropriate encryption-related -// headers to encrypt the object using server-side encryption with Amazon Web -// Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided -// encryption key. With server-side encryption, Amazon S3 encrypts your data as it -// writes it to disks in its data centers and decrypts the data when you access it. -// For more information about server-side encryption, see Using Server-Side -// Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). If -// a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. -// For more information, see Amazon S3 Bucket Keys -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon -// S3 User Guide. Access Control List (ACL)-Specific Request Headers When copying -// an object, you can optionally use headers to grant ACL-based permissions. By -// default, all objects are private. Only the owner has full access control. When -// adding a new object, you can grant permissions to individual Amazon Web Services -// accounts or to predefined groups defined by Amazon S3. These permissions are -// then added to the ACL on the object. For more information, see Access Control -// List (ACL) Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing -// ACLs Using the REST API -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). If -// the bucket that you're copying objects to uses the bucket owner enforced setting -// for S3 Object Ownership, ACLs are disabled and no longer affect permissions. -// Buckets that use this setting only accept PUT requests that don't specify an ACL -// or PUT requests that specify bucket owner full control ACLs, such as the -// bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed -// in the XML format. For more information, see Controlling ownership of objects -// and disabling ACLs -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced -// setting for Object Ownership, all objects written to the bucket by any account -// will be owned by the bucket owner. Checksums When copying an object, if it has a -// checksum, that checksum will be copied to the new object by default. When you -// copy the object over, you may optionally specify a different checksum algorithm -// to use with the x-amz-checksum-algorithm header. Storage Class Options You can -// use the CopyObject action to change the storage class of an object that is -// already stored in Amazon S3 using the StorageClass parameter. For more -// information, see Storage Classes -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in -// the Amazon S3 User Guide. Versioning By default, x-amz-copy-source identifies -// the current version of an object to copy. If the current version is a delete -// marker, Amazon S3 behaves as if the object was deleted. To copy a different -// version, use the versionId subresource. If you enable versioning on the target -// bucket, Amazon S3 generates a unique version ID for the object being copied. -// This version ID is different from the version ID of the source object. Amazon S3 -// returns the version ID of the copied object in the x-amz-version-id response -// header in the response. If you do not enable versioning or suspend it on the -// target bucket, the version ID that Amazon S3 generates is always null. If the -// source object's storage class is GLACIER, you must restore a copy of this object -// before you can use it as a source object for the copy operation. For more -// information, see RestoreObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). The -// following operations are related to CopyObject: -// -// * PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// * -// GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// For more -// information, see Copying Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). +// Charge The copy request charge is based on the storage class and Region that +// you specify for the destination object. The request can also result in a data +// retrieval charge for the source if the source storage class bills for data +// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/) +// . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to CopyObject : +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) { if params == nil { params = &CopyObjectInput{} @@ -175,59 +123,81 @@ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns type CopyObjectInput struct { - // The name of the destination bucket. When using this action with an access point, + // The name of the destination bucket. Directory buckets - When you use this + // operation with a directory bucket, you must use virtual-hosted-style requests in + // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style + // requests are not supported. Directory bucket names must be unique in the chosen + // Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, // you must direct requests to the access point hostname. The access point hostname // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. // When using this action with an access point through the Amazon Web Services // SDKs, you provide the access point ARN in place of the bucket name. For more - // information about access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string - // Specifies the source object for the copy operation. You specify the value in one - // of two formats, depending on whether you want to access the source object - // through an access point - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): - // - // * - // For objects not accessed through an access point, specify the name of the source - // bucket and the key of the source object, separated by a slash (/). For example, - // to copy the object reports/january.pdf from the bucket awsexamplebucket, use - // awsexamplebucket/reports/january.pdf. The value must be URL-encoded. - // - // * For - // objects accessed through access points, specify the Amazon Resource Name (ARN) - // of the object as accessed through the access point, in the format - // arn:aws:s3:::accesspoint//object/. For example, to copy the object - // reports/january.pdf through access point my-access-point owned by account - // 123456789012 in Region us-west-2, use the URL encoding of - // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. - // The value must be URL encoded. Amazon S3 supports copy operations using access - // points only when the source and destination buckets are in the same Amazon Web - // Services Region. Alternatively, for objects accessed through Amazon S3 on - // Outposts, specify the ARN of the object as accessed in the format - // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object - // reports/january.pdf through outpost my-outpost owned by account 123456789012 in - // Region us-west-2, use the URL encoding of - // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. - // The value must be URL-encoded. - // - // To copy a specific version of an object, append - // ?versionId= to the value (for example, - // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). - // If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. + // Specifies the source object for the copy operation. The source object can be up + // to 5 GB. If the source object is an object that was uploaded by using a + // multipart upload, the object copy will be a single part object after the source + // object is copied to the destination bucket. You specify the value of the copy + // source in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) + // : + // - For objects not accessed through an access point, specify the name of the + // source bucket and the key of the source object, separated by a slash (/). For + // example, to copy the object reports/january.pdf from the general purpose + // bucket awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value + // must be URL-encoded. To copy the object reports/january.pdf from the directory + // bucket awsexamplebucket--use1-az5--x-s3 , use + // awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be + // URL-encoded. + // - For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the format + // arn:aws:s3:::accesspoint//object/ . For example, to copy the object + // reports/january.pdf through access point my-access-point owned by account + // 123456789012 in Region us-west-2 , use the URL encoding of + // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf + // . The value must be URL encoded. + // - Amazon S3 supports copy operations using Access points only when the source + // and destination buckets are in the same Amazon Web Services Region. + // - Access points are not supported by directory buckets. Alternatively, for + // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as + // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, + // to copy the object reports/january.pdf through outpost my-outpost owned by + // account 123456789012 in Region us-west-2 , use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf + // . The value must be URL-encoded. + // If your source bucket versioning is enabled, the x-amz-copy-source header by + // default identifies the current version of an object to copy. If the current + // version is a delete marker, Amazon S3 behaves as if the object was deleted. To + // copy a different version, use the versionId query parameter. Specifically, + // append ?versionId= to the value (for example, + // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 + // ). If you don't specify a version ID, Amazon S3 copies the latest version of the + // source object. If you enable versioning on the destination bucket, Amazon S3 + // generates a unique version ID for the copied object. This version ID is + // different from the version ID of the source object. Amazon S3 returns the + // version ID of the copied object in the x-amz-version-id response header in the + // response. If you do not enable versioning or suspend it on the destination + // bucket, the version ID that Amazon S3 generates in the x-amz-version-id + // response header is always null. Directory buckets - S3 Versioning isn't enabled + // and supported for directory buckets. // // This member is required. CopySource *string @@ -237,222 +207,426 @@ type CopyObjectInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. This action is not supported by Amazon S3 - // on Outposts. + // The canned access control list (ACL) to apply to the object. When you copy an + // object, the ACL metadata is not preserved and is set to private by default. + // Only the owner has full access control. To override the default ACL setting, + // specify a new ACL when you generate a copy request. For more information, see + // Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) + // . If the destination bucket that you're copying objects to uses the bucket owner + // enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect + // permissions. Buckets that use this setting only accept PUT requests that don't + // specify an ACL or PUT requests that specify bucket owner full control ACLs, + // such as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. For more information, see Controlling + // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // - If your destination bucket uses the bucket owner enforced setting for + // Object Ownership, all objects written to the bucket by any account will be owned + // by the bucket owner. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true - // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. - // Specifying this header with a COPY action doesn’t affect bucket-level settings - // for S3 Bucket Key. - BucketKeyEnabled bool - - // Specifies caching behavior along the request/reply chain. + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object + // encryption with SSE-KMS. Specifying this header with a COPY action doesn’t + // affect bucket-level settings for S3 Bucket Key. For more information, see + // Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. This functionality is not supported when the + // destination bucket is a directory bucket. + BucketKeyEnabled *bool + + // Specifies the caching behavior along the request/reply chain. CacheControl *string - // Indicates the algorithm you want Amazon S3 to use to create the checksum for the - // object. For more information, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. When you copy an object, if the source object has a + // checksum, that checksum value will be copied to the new object by default. If + // the CopyObject request does not include this x-amz-checksum-algorithm header, + // the checksum algorithm will be copied from the source object to the destination + // object (if it's present on the source object). You can optionally specify a + // different checksum algorithm to use with the x-amz-checksum-algorithm header. + // Unrecognized or unsupported values will respond with the HTTP status code 400 + // Bad Request . For directory buckets, when you use Amazon Web Services SDKs, + // CRC32 is the default checksum algorithm that's used for performance. ChecksumAlgorithm types.ChecksumAlgorithm - // Specifies presentational information for the object. + // Specifies presentational information for the object. Indicates whether an + // object should be displayed in a web browser or downloaded as a file. It allows + // specifying the desired filename for the downloaded file. ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. + // Content-Type header field. For directory buckets, only the aws-chunked value is + // supported in this header field. ContentEncoding *string // The language the content is in. ContentLanguage *string - // A standard MIME type describing the format of the object data. + // A standard MIME type that describes the format of the object data. ContentType *string - // Copies the object if its entity tag (ETag) matches the specified tag. + // Copies the object if its entity tag (ETag) matches the specified tag. If both + // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // - x-amz-copy-source-if-match condition evaluates to true + // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. + // Copies the object if it has been modified since the specified time. If both the + // x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers + // are present in the request and evaluate as follows, Amazon S3 returns the 412 + // Precondition Failed response code: + // - x-amz-copy-source-if-none-match condition evaluates to false + // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified ETag. + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. If both the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request and + // evaluate as follows, Amazon S3 returns the 412 Precondition Failed response + // code: + // - x-amz-copy-source-if-none-match condition evaluates to false + // - x-amz-copy-source-if-modified-since condition evaluates to true CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. + // Copies the object if it hasn't been modified since the specified time. If both + // the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // - x-amz-copy-source-if-match condition evaluates to true + // - x-amz-copy-source-if-unmodified-since condition evaluates to false CopySourceIfUnmodifiedSince *time.Time // Specifies the algorithm to use when decrypting the source object (for example, - // AES256). + // AES256 ). If the source object for the copy is stored in Amazon S3 using SSE-C, + // you must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. This functionality is not + // supported when the source object is in a directory bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. + // the source object. The encryption key provided in this header must be the same + // one that was used when the source object was created. If the source object for + // the copy is stored in Amazon S3 using SSE-C, you must provide the necessary + // encryption information in your request so that Amazon S3 can decrypt the object + // for copying. This functionality is not supported when the source object is in a + // directory bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. If the source object for the copy + // is stored in Amazon S3 using SSE-C, you must provide the necessary encryption + // information in your request so that Amazon S3 can decrypt the object for + // copying. This functionality is not supported when the source object is in a + // directory bucket. CopySourceSSECustomerKeyMD5 *string - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). + // The account ID of the expected destination bucket owner. If the account ID that + // you provide does not match the actual owner of the destination bucket, the + // request fails with the HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // The account ID of the expected source bucket owner. If the source bucket is - // owned by a different account, the request fails with the HTTP status code 403 - // Forbidden (access denied). + // The account ID of the expected source bucket owner. If the account ID that you + // provide does not match the actual owner of the source bucket, the request fails + // with the HTTP status code 403 Forbidden (access denied). ExpectedSourceBucketOwner *string // The date and time at which the object is no longer cacheable. Expires *time.Time - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This - // action is not supported by Amazon S3 on Outposts. + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to read the object data and its metadata. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to read the object data and its metadata. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the object ACL. This action is not supported by Amazon S3 - // on Outposts. + // Allows grantee to read the object ACL. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to write the ACL for the applicable object. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // A map of metadata to store with the object in S3. Metadata map[string]string - // Specifies whether the metadata is copied from the source object or replaced with - // metadata provided in the request. + // Specifies whether the metadata is copied from the source object or replaced + // with metadata that's provided in the request. When copying an object, you can + // preserve all metadata (the default) or specify new metadata. If this header + // isn’t specified, COPY is the default behavior. General purpose bucket - For + // general purpose buckets, when you grant permissions, you can use the + // s3:x-amz-metadata-directive condition key to enforce certain metadata behavior + // when objects are uploaded. For more information, see Amazon S3 condition key + // examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) + // in the Amazon S3 User Guide. x-amz-website-redirect-location is unique to each + // object and is not copied when using the x-amz-metadata-directive header. To + // copy the value, you must specify x-amz-website-redirect-location in the request + // header. MetadataDirective types.MetadataDirective - // Specifies whether you want to apply a legal hold to the copied object. + // Specifies whether you want to apply a legal hold to the object copy. This + // functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to the copied object. + // The Object Lock mode that you want to apply to the object copy. This + // functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // The date and time when you want the copied object's Object Lock to expire. + // The date and time when you want the Object Lock of the object copy to expire. + // This functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256 + // ). When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting in + // your request is different from the default encryption configuration of the + // destination bucket, the encryption setting in your request takes precedence. + // This functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is - // discarded; Amazon S3 does not store the encryption key. The key must be + // discarded. Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported when the destination bucket is a directory bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported when the destination bucket is a directory bucket. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // JSON with the encryption context key-value pairs. This value must be explicitly + // added to specify encryption context for CopyObject requests. This functionality + // is not supported when the destination bucket is a directory bucket. SSEKMSEncryptionContext *string - // Specifies the Amazon Web Services KMS key ID to use for object encryption. All - // GET and PUT requests for an object protected by Amazon Web Services KMS will - // fail if not made via SSL or using SigV4. For information about configuring using - // any of the officially supported Amazon Web Services SDKs and Amazon Web Services - // CLI, see Specifying the Signature Version in Request Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. + // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object + // encryption. All GET and PUT requests for an object protected by KMS will fail if + // they're not made via SSL or using SigV4. For information about configuring any + // of the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. This functionality is not supported when the + // destination bucket is a directory bucket. SSEKMSKeyId *string // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256, aws:kms). + // (for example, AES256 , aws:kms , aws:kms:dsse ). Unrecognized or unsupported + // values won’t write a destination object and will receive a 400 Bad Request + // response. Amazon S3 automatically encrypts all new objects that are copied to an + // S3 bucket. When copying an object, if you don't specify encryption information + // in your copy request, the encryption setting of the target object is set to the + // default encryption configuration of the destination bucket. By default, all + // buckets have a base level of encryption configuration that uses server-side + // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a + // default encryption configuration that uses server-side encryption with Key + // Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with + // Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with + // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS + // key, or a customer-provided key to encrypt the target object copy. When you + // perform a CopyObject operation, if you want to use a different type of + // encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting in + // your request is different from the default encryption configuration of the + // destination bucket, the encryption setting in your request takes precedence. + // With server-side encryption, Amazon S3 encrypts your data as it writes your data + // to disks in its data centers and decrypts the data when you access it. For more + // information about server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) + // in the Amazon S3 User Guide. For directory buckets, only server-side encryption + // with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high - // availability. Depending on performance needs, you can specify a different - // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For - // more information, see Storage Classes - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in - // the Amazon S3 User Guide. + // If the x-amz-storage-class header is not used, the copied object will be stored + // in the STANDARD Storage Class by default. The STANDARD storage class provides + // high durability and high availability. Depending on performance needs, you can + // specify a different Storage Class. + // - Directory buckets - For directory buckets, only the S3 Express One Zone + // storage class is supported to store newly created objects. Unsupported storage + // class values won't write a destination object and will respond with the HTTP + // status code 400 Bad Request . + // - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class. + // You can use the CopyObject action to change the storage class of an object that + // is already stored in Amazon S3 by using the x-amz-storage-class header. For + // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. Before using an object as a source object for the + // copy operation, you must restore a copy of it if it meets any of the following + // conditions: + // - The storage class of the source object is GLACIER or DEEP_ARCHIVE . + // - The storage class of the source object is INTELLIGENT_TIERING and it's S3 + // Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) + // is Archive Access or Deep Archive Access . + // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) + // in the Amazon S3 User Guide. StorageClass types.StorageClass - // The tag-set for the object destination object this value must be used in - // conjunction with the TaggingDirective. The tag-set must be encoded as URL Query - // parameters. + // The tag-set for the object copy in the destination bucket. This value must be + // used in conjunction with the x-amz-tagging-directive if you choose REPLACE for + // the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive + // , you don't need to set the x-amz-tagging header, because the tag-set will be + // copied from the source object directly. The tag-set must be encoded as URL Query + // parameters. The default value is the empty value. Directory buckets - For + // directory buckets in a CopyObject operation, only the empty tag-set is + // supported. Any requests that attempt to write non-empty tags into directory + // buckets will receive a 501 Not Implemented status code. When the destination + // bucket is a directory bucket, you will receive a 501 Not Implemented response + // in any of the following situations: + // - When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // - When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging . + // - When you don't set the x-amz-tagging-directive header and the source object + // has non-empty tags. This is because the default value of + // x-amz-tagging-directive is COPY . + // Because only the empty tag-set is supported for directory buckets in a + // CopyObject operation, the following situations are allowed: + // - When you attempt to COPY the tag-set from a directory bucket source object + // that has no tags to a general purpose bucket. It copies an empty tag-set to the + // destination object. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and set the x-amz-tagging value of the directory bucket destination object to + // empty. + // - When you attempt to REPLACE the tag-set of a general purpose bucket source + // object that has non-empty tags and set the x-amz-tagging value of the + // directory bucket destination object to empty. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty value. Tagging *string - // Specifies whether the object tag-set are copied from the source object or - // replaced with tag-set provided in the request. + // Specifies whether the object tag-set is copied from the source object or + // replaced with the tag-set that's provided in the request. The default value is + // COPY . Directory buckets - For directory buckets in a CopyObject operation, + // only the empty tag-set is supported. Any requests that attempt to write + // non-empty tags into directory buckets will receive a 501 Not Implemented status + // code. When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // - When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // - When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging . + // - When you don't set the x-amz-tagging-directive header and the source object + // has non-empty tags. This is because the default value of + // x-amz-tagging-directive is COPY . + // Because only the empty tag-set is supported for directory buckets in a + // CopyObject operation, the following situations are allowed: + // - When you attempt to COPY the tag-set from a directory bucket source object + // that has no tags to a general purpose bucket. It copies an empty tag-set to the + // destination object. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and set the x-amz-tagging value of the directory bucket destination object to + // empty. + // - When you attempt to REPLACE the tag-set of a general purpose bucket source + // object that has non-empty tags and set the x-amz-tagging value of the + // directory bucket destination object to empty. + // - When you attempt to REPLACE the tag-set of a directory bucket source object + // and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty value. TaggingDirective types.TaggingDirective - // If the bucket is configured as a website, redirects requests for this object to - // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. + // If the destination bucket is configured as a website, redirects requests for + // this object copy to another object in the same bucket or to an external URL. + // Amazon S3 stores the value of this header in the object metadata. This value is + // unique to each object and is not copied when using the x-amz-metadata-directive + // header. Instead, you may opt to provide this header in combination with the + // x-amz-metadata-directive header. This functionality is not supported for + // directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } +func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.DisableS3ExpressSessionAuth = ptr.Bool(true) +} + type CopyObjectOutput struct { // Indicates whether the copied object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool // Container for all response elements. CopyObjectResult *types.CopyObjectResult - // Version of the copied object in the destination bucket. + // Version ID of the source object that was copied. This functionality is not + // supported when the source object is in a directory bucket. CopySourceVersionId *string - // If the object expiration is configured, the response includes this header. + // If the object expiration is configured, the response includes this header. This + // functionality is not supported for directory buckets. Expiration *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm used. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // If present, indicates the Amazon Web Services KMS Encryption Context to use for // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. + // holding JSON with the encryption context key-value pairs. This functionality is + // not supported for directory buckets. SSEKMSEncryptionContext *string - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for the - // object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. ServerSideEncryption types.ServerSideEncryption - // Version ID of the newly created copy. + // Version ID of the newly created copy. This functionality is not supported for + // directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -462,6 +636,9 @@ type CopyObjectOutput struct { } func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpCopyObject{}, middleware.After) if err != nil { return err @@ -470,6 +647,13 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CopyObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -488,16 +672,13 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -506,7 +687,10 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpCopyObjectValidationMiddleware(stack); err != nil { @@ -518,6 +702,9 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addCopyObjectUpdateEndpoint(stack, options); err != nil { return err } @@ -536,20 +723,33 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *CopyObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opCopyObject(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "CopyObject", } } -// getCopyObjectBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, +// getCopyObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, func getCopyObjectBucketMember(input interface{}) (*string, bool) { in := input.(*CopyObjectInput) if in.Bucket == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go index 27322a2c..6357444b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go @@ -4,133 +4,93 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 -// and have a valid Amazon Web Services Access Key ID to authenticate requests. +// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts +// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) +// . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and +// have a valid Amazon Web Services Access Key ID to authenticate requests. // Anonymous requests are never allowed to create buckets. By creating the bucket, -// you become the bucket owner. Not every string is an acceptable bucket name. For -// information about bucket naming restrictions, see Bucket naming rules -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). -// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). -// By default, the bucket is created in the US East (N. Virginia) Region. You can -// optionally specify a Region in the request body. You might choose a Region to -// optimize latency, minimize costs, or address regulatory requirements. For -// example, if you reside in Europe, you will probably find it advantageous to -// create buckets in the Europe (Ireland) Region. For more information, see -// Accessing a bucket -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). -// If you send your create bucket request to the s3.amazonaws.com endpoint, the -// request goes to the us-east-1 Region. Accordingly, the signature calculations in -// Signature Version 4 must use us-east-1 as the Region, even if the location -// constraint in the request specifies another Region where the bucket is to be -// created. If you create a bucket in a Region other than US East (N. Virginia), -// your application must be able to handle 307 redirect. For more information, see -// Virtual hosting of buckets -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Access -// control lists (ACLs) When creating a bucket using this operation, you can -// optionally configure the bucket ACL to specify the accounts or groups that -// should be granted specific permissions on the bucket. If your CreateBucket -// request sets bucket owner enforced for S3 Object Ownership and specifies a -// bucket ACL that provides access to an external Amazon Web Services account, your -// request fails with a 400 error and returns the -// InvalidBucketAclWithObjectOwnership error code. For more information, see -// Controlling object ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. There are two ways to grant the appropriate -// permissions using the request headers. +// you become the bucket owner. There are two types of buckets: general purpose +// buckets and directory buckets. For more information about these bucket types, +// see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) +// in the Amazon S3 User Guide. +// - General purpose buckets - If you send your CreateBucket request to the +// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So +// the signature calculations in Signature Version 4 must use us-east-1 as the +// Region, even if the location constraint in the request specifies another Region +// where the bucket is to be created. If you create a bucket in a Region other than +// US East (N. Virginia), your application must be able to handle 307 redirect. For +// more information, see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) +// in the Amazon S3 User Guide. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// * Specify a canned ACL using the -// x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as -// canned ACLs. Each canned ACL has a predefined set of grantees and permissions. -// For more information, see Canned ACL -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// Permissions +// - General purpose bucket permissions - In addition to the s3:CreateBucket +// permission, the following permissions are required in a policy when your +// CreateBucket request includes specific headers: +// - Access control lists (ACLs) - In your CreateBucket request, if you specify +// an access control list (ACL) and set it to public-read , public-read-write , +// authenticated-read , or if you explicitly specify any other custom ACLs, both +// s3:CreateBucket and s3:PutBucketAcl permissions are required. In your +// CreateBucket request, if you set the ACL to private , or if you don't specify +// any ACLs, only the s3:CreateBucket permission is required. +// - Object Lock - In your CreateBucket request, if you set +// x-amz-bucket-object-lock-enabled to true, the +// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are +// required. +// - S3 Object Ownership - If your CreateBucket request includes the +// x-amz-object-ownership header, then the s3:PutBucketOwnershipControls +// permission is required. If your CreateBucket request sets BucketOwnerEnforced +// for Amazon S3 Object Ownership and specifies a bucket ACL that provides access +// to an external Amazon Web Services account, your request fails with a 400 +// error and returns the InvalidBucketAcLWithObjectOwnership error code. For more +// information, see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html) +// in the Amazon S3 User Guide. +// - S3 Block Public Access - If your specific use case requires granting public +// access to your S3 resources, you can disable Block Public Access. Specifically, +// you can create a new bucket with Block Public Access enabled, then separately +// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about S3 Block Public Access, see Blocking +// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see Amazon +// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 Object +// Ownership, and S3 Block Public Access are not supported for directory buckets. +// For directory buckets, all Block Public Access settings are enabled at the +// bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs +// disabled). These settings can't be modified. For more information about +// permissions for creating and working with directory buckets, see Directory +// buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. For more information about supported S3 features +// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) +// in the Amazon S3 User Guide. // -// * -// Specify access permissions explicitly using the x-amz-grant-read, -// x-amz-grant-write, x-amz-grant-read-acp, x-amz-grant-write-acp, and -// x-amz-grant-full-control headers. These headers map to the set of permissions -// Amazon S3 supports in an ACL. For more information, see Access control list -// (ACL) overview -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html). You -// specify each grantee as a type=value pair, where the type is one of the -// following: -// -// * id – if the value specified is the canonical user ID of an Amazon -// Web Services account -// -// * uri – if you are granting permissions to a predefined -// group -// -// * emailAddress – if the value specified is the email address of an Amazon -// Web Services account Using email addresses to specify a grantee is only -// supported in the following Amazon Web Services Regions: -// -// * US East (N. -// Virginia) -// -// * US West (N. California) -// -// * US West (Oregon) -// -// * Asia Pacific -// (Singapore) -// -// * Asia Pacific (Sydney) -// -// * Asia Pacific (Tokyo) -// -// * Europe -// (Ireland) -// -// * South America (São Paulo) -// -// For a list of all the Amazon S3 -// supported Regions and endpoints, see Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the -// Amazon Web Services General Reference. -// -// For example, the following -// x-amz-grant-read header grants the Amazon Web Services accounts identified by -// account IDs permissions to read object data and its metadata: x-amz-grant-read: -// id="11112222333", id="444455556666" -// -// You can use either a canned ACL or specify -// access permissions explicitly. You cannot do both. Permissions In addition to -// s3:CreateBucket, the following permissions are required when your CreateBucket -// includes specific headers: -// -// * ACLs - If your CreateBucket request specifies ACL -// permissions and the ACL is public-read, public-read-write, authenticated-read, -// or if you specify access permissions explicitly through any other ACL, both -// s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the -// CreateBucket request is private or doesn't specify any ACLs, only -// s3:CreateBucket permission is needed. -// -// * Object Lock - If -// ObjectLockEnabledForBucket is set to true in your CreateBucket request, -// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are -// required. -// -// * S3 Object Ownership - If your CreateBucket request includes the the -// x-amz-object-ownership header, s3:PutBucketOwnershipControls permission is -// required. -// -// The following operations are related to CreateBucket: -// -// * PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// * -// DeleteBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . The following operations are related to +// CreateBucket : +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, optFns ...func(*Options)) (*CreateBucketOutput, error) { if params == nil { params = &CreateBucketInput{} @@ -148,37 +108,52 @@ func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, op type CreateBucketInput struct { - // The name of the bucket to create. + // The name of the bucket to create. General purpose buckets - For information + // about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) + // in the Amazon S3 User Guide. Directory buckets - When you use this operation + // with a directory bucket, you must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide // // This member is required. Bucket *string - // The canned ACL to apply to the bucket. + // The canned ACL to apply to the bucket. This functionality is not supported for + // directory buckets. ACL types.BucketCannedACL // The configuration information for the bucket. CreateBucketConfiguration *types.CreateBucketConfiguration // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. + // bucket. This functionality is not supported for directory buckets. GrantFullControl *string - // Allows grantee to list the objects in the bucket. + // Allows grantee to list the objects in the bucket. This functionality is not + // supported for directory buckets. GrantRead *string - // Allows grantee to read the bucket ACL. + // Allows grantee to read the bucket ACL. This functionality is not supported for + // directory buckets. GrantReadACP *string // Allows grantee to create new objects in the bucket. For the bucket and object // owners of existing objects, also allows deletions and overwrites of those - // objects. + // objects. This functionality is not supported for directory buckets. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. + // Allows grantee to write the ACL for the applicable bucket. This functionality + // is not supported for directory buckets. GrantWriteACP *string // Specifies whether you want S3 Object Lock to be enabled for the new bucket. - ObjectLockEnabledForBucket bool + // This functionality is not supported for directory buckets. + ObjectLockEnabledForBucket *bool // The container element for object ownership for a bucket's ownership controls. // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the @@ -188,14 +163,27 @@ type CreateBucketInput struct { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't - // specify an ACL or bucket owner full control ACLs, such as the - // bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed - // in the XML format. + // specify an ACL or specify bucket owner full control ACLs (such as the predefined + // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants + // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced + // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon + // use cases where you must control access for each object individually. For more + // information about S3 Object Ownership, see Controlling ownership of objects and + // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. Directory buckets use the bucket owner enforced setting for S3 Object + // Ownership. ObjectOwnership types.ObjectOwnership noSmithyDocumentSerde } +func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) + p.DisableAccessPoints = ptr.Bool(true) +} + type CreateBucketOutput struct { // A forward slash followed by the name of the bucket. @@ -208,6 +196,9 @@ type CreateBucketOutput struct { } func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpCreateBucket{}, middleware.After) if err != nil { return err @@ -216,6 +207,13 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBucket"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -234,16 +232,13 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -252,7 +247,10 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpCreateBucketValidationMiddleware(stack); err != nil { @@ -264,6 +262,9 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addCreateBucketUpdateEndpoint(stack, options); err != nil { return err } @@ -279,14 +280,26 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *CreateBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opCreateBucket(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "CreateBucket", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go index 825feebd..4f24e11c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -16,223 +17,135 @@ import ( // This action initiates a multipart upload and returns an upload ID. This upload // ID is used to associate all of the parts in the specific multipart upload. You // specify this upload ID in each of your subsequent upload part requests (see -// UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). You also -// include this upload ID in the final request to either complete or abort the -// multipart upload request. For more information about multipart uploads, see -// Multipart Upload Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). If you have -// configured a lifecycle rule to abort incomplete multipart uploads, the upload -// must complete within the number of days specified in the bucket lifecycle +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// ). You also include this upload ID in the final request to either complete or +// abort the multipart upload request. For more information about multipart +// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) +// in the Amazon S3 User Guide. After you initiate a multipart upload and upload +// one or more parts, to stop being charged for storing the uploaded parts, you +// must either complete or abort the multipart upload. Amazon S3 frees up the space +// used to store the parts and stops charging you for storing them only after you +// either complete or abort a multipart upload. If you have configured a lifecycle +// rule to abort incomplete multipart uploads, the created multipart upload must be +// completed within the number of days specified in the bucket lifecycle // configuration. Otherwise, the incomplete multipart upload becomes eligible for // an abort action and Amazon S3 aborts the multipart upload. For more information, -// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). -// For information about the permissions required to use the multipart upload API, -// see Multipart Upload and Permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). For -// request signing, multipart upload is just a series of regular requests. You -// initiate a multipart upload, send one or more requests to upload parts, and then -// complete the multipart upload process. You sign each request individually. There -// is nothing special about signing multipart upload requests. For more information -// about signing, see Authenticating Requests (Amazon Web Services Signature -// Version 4) -// (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). -// After you initiate a multipart upload and upload one or more parts, to stop -// being charged for storing the uploaded parts, you must either complete or abort -// the multipart upload. Amazon S3 frees up the space used to store the parts and -// stop charging you for storing them only after you either complete or abort a -// multipart upload. You can optionally request server-side encryption. For -// server-side encryption, Amazon S3 encrypts your data as it writes it to disks in -// its data centers and decrypts it when you access it. You can provide your own -// encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed -// encryption keys. If you choose to provide your own encryption key, the request -// headers you provide in UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) and -// UploadPartCopy -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// requests must match the headers you used in the request to initiate the upload -// by using CreateMultipartUpload. To perform a multipart upload with encryption -// using an Amazon Web Services KMS key, the requester must have permission to the -// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are -// required because Amazon S3 must decrypt and read data from the encrypted file -// parts before it completes the multipart upload. For more information, see -// Multipart upload API and permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) -// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user -// or role is in the same Amazon Web Services account as the KMS key, then you must -// have these permissions on the key policy. If your IAM user or role belongs to a -// different account than the key, then you must have the permissions on both the -// key policy and your IAM user or role. For more information, see Protecting Data -// Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). -// Access Permissions When copying an object, you can optionally specify the -// accounts or groups that should be granted specific permissions on the new -// object. There are two ways to grant the permissions using the request -// headers: -// -// * Specify a canned ACL with the x-amz-acl request header. For more -// information, see Canned ACL -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * -// Specify access permissions explicitly with the x-amz-grant-read, -// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control -// headers. These parameters map to the set of permissions that Amazon S3 supports -// in an ACL. For more information, see Access Control List (ACL) Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// -// You can -// use either a canned ACL or specify access permissions explicitly. You cannot do -// both. Server-Side- Encryption-Specific Request Headers You can optionally tell -// Amazon S3 to encrypt data at rest using server-side encryption. Server-side -// encryption is for data encryption at rest. Amazon S3 encrypts your data as it -// writes it to disks in its data centers and decrypts it when you access it. The -// option you use depends on whether you want to use Amazon Web Services managed -// encryption keys or provide your own encryption key. -// -// * Use encryption keys -// managed by Amazon S3 or customer managed key stored in Amazon Web Services Key -// Management Service (Amazon Web Services KMS) – If you want Amazon Web Services -// to manage the keys used to encrypt data, specify the following headers in the -// request. -// -// * x-amz-server-side-encryption -// -// * -// x-amz-server-side-encryption-aws-kms-key-id -// -// * -// x-amz-server-side-encryption-context -// -// If you specify -// x-amz-server-side-encryption:aws:kms, but don't provide -// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web -// Services managed key in Amazon Web Services KMS to protect the data. All GET and -// PUT requests for an object protected by Amazon Web Services KMS fail if you -// don't make them with SSL or by using SigV4. For more information about -// server-side encryption with KMS key (SSE-KMS), see Protecting Data Using -// Server-Side Encryption with KMS keys -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// * -// Use customer-provided encryption keys – If you want to manage your own -// encryption keys, provide all the following headers in the request. -// -// * -// x-amz-server-side-encryption-customer-algorithm -// -// * -// x-amz-server-side-encryption-customer-key -// -// * -// x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about -// server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using -// Server-Side Encryption with KMS keys -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// Access-Control-List -// (ACL)-Specific Request Headers You also can use the following access -// control–related headers with this operation. By default, all objects are -// private. Only the owner has full access control. When adding a new object, you -// can grant permissions to individual Amazon Web Services accounts or to -// predefined groups defined by Amazon S3. These permissions are then added to the -// access control list (ACL) on the object. For more information, see Using ACLs -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). With -// this operation, you can grant access permissions using one of the following two -// methods: -// -// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of -// predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of -// grantees and permissions. For more information, see Canned ACL -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * -// Specify access permissions explicitly — To explicitly grant access permissions -// to specific Amazon Web Services accounts or groups, use the following headers. -// Each header maps to specific permissions that Amazon S3 supports in an ACL. For -// more information, see Access Control List (ACL) Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). In the -// header, you specify a list of grantees who get the specific permission. To grant -// permissions explicitly, use: -// -// * x-amz-grant-read -// -// * x-amz-grant-write -// -// * -// x-amz-grant-read-acp -// -// * x-amz-grant-write-acp -// -// * x-amz-grant-full-control -// -// You -// specify each grantee as a type=value pair, where the type is one of the -// following: -// -// * id – if the value specified is the canonical user ID of an Amazon -// Web Services account -// -// * uri – if you are granting permissions to a predefined -// group -// -// * emailAddress – if the value specified is the email address of an Amazon -// Web Services account Using email addresses to specify a grantee is only -// supported in the following Amazon Web Services Regions: -// -// * US East (N. -// Virginia) -// -// * US West (N. California) -// -// * US West (Oregon) -// -// * Asia Pacific -// (Singapore) -// -// * Asia Pacific (Sydney) -// -// * Asia Pacific (Tokyo) -// -// * Europe -// (Ireland) -// -// * South America (São Paulo) -// -// For a list of all the Amazon S3 -// supported Regions and endpoints, see Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the -// Amazon Web Services General Reference. -// -// For example, the following -// x-amz-grant-read header grants the Amazon Web Services accounts identified by -// account IDs permissions to read object data and its metadata: x-amz-grant-read: -// id="11112222333", id="444455556666" -// -// The following operations are related to -// CreateMultipartUpload: -// -// * UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// * -// CompleteMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// * -// AbortMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// * -// ListParts -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// * -// ListMultipartUploads -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// . +// - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Request signing For request signing, multipart upload is just a series of +// regular requests. You initiate a multipart upload, send one or more requests to +// upload parts, and then complete the multipart upload process. You sign each +// request individually. There is nothing special about signing multipart upload +// requests. For more information about signing, see Authenticating Requests +// (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about the permissions +// required to use the multipart upload API, see Multipart upload and permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. To perform a multipart upload with encryption by +// using an Amazon Web Services KMS key, the requester must have permission to the +// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are +// required because Amazon S3 must decrypt and read data from the encrypted file +// parts before it completes the multipart upload. For more information, see +// Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Encryption +// - General purpose buckets - Server-side encryption is for data encryption at +// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. Amazon S3 automatically encrypts all new +// objects that are uploaded to an S3 bucket. When doing a multipart upload, if you +// don't specify encryption information in your request, the encryption setting of +// the uploaded parts is set to the default encryption configuration of the +// destination bucket. By default, all buckets have a base level of encryption +// configuration that uses server-side encryption with Amazon S3 managed keys +// (SSE-S3). If the destination bucket has a default encryption configuration that +// uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), +// or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding +// KMS key, or a customer-provided key to encrypt the uploaded parts. When you +// perform a CreateMultipartUpload operation, if you want to use a different type +// of encryption setting for the uploaded parts, you can request that Amazon S3 +// encrypts the object with a different encryption key (such as an Amazon S3 +// managed key, a KMS key, or a customer-provided key). When the encryption setting +// in your request is different from the default encryption configuration of the +// destination bucket, the encryption setting in your request takes precedence. If +// you choose to provide your own encryption key, the request headers you provide +// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the CreateMultipartUpload request. +// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key ( +// aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS) +// – If you want Amazon Web Services to manage the keys used to encrypt data, +// specify the following headers in the request. +// - x-amz-server-side-encryption +// - x-amz-server-side-encryption-aws-kms-key-id +// - x-amz-server-side-encryption-context +// - If you specify x-amz-server-side-encryption:aws:kms , but don't provide +// x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web +// Services managed key ( aws/s3 key) in KMS to protect the data. +// - To perform a multipart upload with encryption by using an Amazon Web +// Services KMS key, the requester must have permission to the kms:Decrypt and +// kms:GenerateDataKey* actions on the key. These permissions are required +// because Amazon S3 must decrypt and read data from the encrypted file parts +// before it completes the multipart upload. For more information, see Multipart +// upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// - If your Identity and Access Management (IAM) user or role is in the same +// Amazon Web Services account as the KMS key, then you must have these permissions +// on the key policy. If your IAM user or role is in a different account from the +// key, then you must have the permissions on both the key policy and your IAM user +// or role. +// - All GET and PUT requests for an object protected by KMS fail if you don't +// make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), +// or Signature Version 4. For information about configuring any of the officially +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see +// Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) +// in the Amazon S3 User Guide. For more information about server-side +// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side +// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// - Use customer-provided encryption keys (SSE-C) – If you want to manage your +// own encryption keys, provide all the following headers in the request. +// - x-amz-server-side-encryption-customer-algorithm +// - x-amz-server-side-encryption-customer-key +// - x-amz-server-side-encryption-customer-key-MD5 For more information about +// server-side encryption with customer-provided encryption keys (SSE-C), see +// Protecting data using server-side encryption with customer-provided encryption +// keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// - Directory buckets -For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to CreateMultipartUpload : +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultipartUploadInput, optFns ...func(*Options)) (*CreateMultipartUploadOutput, error) { if params == nil { params = &CreateMultipartUploadInput{} @@ -250,23 +163,31 @@ func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultip type CreateMultipartUploadInput struct { - // The name of the bucket to which to initiate the upload When using this action - // with an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The name of the bucket where the multipart upload is initiated and where the + // object is uploaded. Directory buckets - When you use this operation with a + // directory bucket, you must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -276,23 +197,33 @@ type CreateMultipartUploadInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. This action is not supported by Amazon S3 - // on Outposts. + // The canned ACL to apply to the object. Amazon S3 supports a set of predefined + // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and + // permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. By default, all objects are private. Only the owner + // has full access control. When uploading an object, you can grant access + // permissions to individual Amazon Web Services accounts or to predefined groups + // defined by Amazon S3. These permissions are then added to the access control + // list (ACL) on the new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) + // . One way to grant the permissions using the request headers is to specify a + // canned ACL with the x-amz-acl request header. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. ACL types.ObjectCannedACL // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true - // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. - // Specifying this header with an object action doesn’t affect bucket-level - // settings for S3 Bucket Key. - BucketKeyEnabled bool + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object + // encryption with SSE-KMS. Specifying this header with an object action doesn’t + // affect bucket-level settings for S3 Bucket Key. This functionality is not + // supported for directory buckets. + BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string - // Indicates the algorithm you want Amazon S3 to use to create the checksum for the - // object. For more information, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumAlgorithm types.ChecksumAlgorithm @@ -301,155 +232,265 @@ type CreateMultipartUploadInput struct { // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the - // Content-Type header field. + // Content-Type header field. For directory buckets, only the aws-chunked value is + // supported in this header field. ContentEncoding *string - // The language the content is in. + // The language that the content is in. ContentLanguage *string // A standard MIME type describing the format of the object data. ContentType *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The date and time at which the object is no longer cacheable. Expires *time.Time - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This - // action is not supported by Amazon S3 on Outposts. + // Specify access permissions explicitly to give the grantee READ, READ_ACP, and + // WRITE_ACP permissions on the object. By default, all objects are private. Only + // the owner has full access control. When uploading an object, you can use this + // header to explicitly grant access permissions to specific Amazon Web Services + // accounts or groups. This header maps to specific permissions that Amazon S3 + // supports in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to read the object data and its metadata. This action is not - // supported by Amazon S3 on Outposts. + // Specify access permissions explicitly to allow grantee to read the object data + // and its metadata. By default, all objects are private. Only the owner has full + // access control. When uploading an object, you can use this header to explicitly + // grant access permissions to specific Amazon Web Services accounts or groups. + // This header maps to specific permissions that Amazon S3 supports in an ACL. For + // more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the object ACL. This action is not supported by Amazon S3 - // on Outposts. + // Specify access permissions explicitly to allows grantee to read the object ACL. + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header maps + // to specific permissions that Amazon S3 supports in an ACL. For more information, + // see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to write the ACL for the applicable object. This action is not - // supported by Amazon S3 on Outposts. + // Specify access permissions explicitly to allows grantee to allow grantee to + // write the ACL for the applicable object. By default, all objects are private. + // Only the owner has full access control. When uploading an object, you can use + // this header to explicitly grant access permissions to specific Amazon Web + // Services accounts or groups. This header maps to specific permissions that + // Amazon S3 supports in an ACL. For more information, see Access Control List + // (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. You specify each grantee as a type=value pair, + // where the type is one of the following: + // - id – if the value specified is the canonical user ID of an Amazon Web + // Services account + // - uri – if you are granting permissions to a predefined group + // - emailAddress – if the value specified is the email address of an Amazon Web + // Services account Using email addresses to specify a grantee is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) For a list of all the Amazon S3 supported Regions + // and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data and + // its metadata: x-amz-grant-read: id="11112222333", id="444455556666" + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // A map of metadata to store with the object in S3. Metadata map[string]string - // Specifies whether you want to apply a legal hold to the uploaded object. + // Specifies whether you want to apply a legal hold to the uploaded object. This + // functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Specifies the Object Lock mode that you want to apply to the uploaded object. + // This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // Specifies the date and time when you want the Object Lock to expire. + // Specifies the date and time when you want the Object Lock to expire. This + // functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. SSECustomerKey *string - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity check + // to ensure that the encryption key was transmitted without error. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // JSON with the encryption context key-value pairs. This functionality is not + // supported for directory buckets. SSEKMSEncryptionContext *string - // Specifies the ID of the symmetric customer managed key to use for object - // encryption. All GET and PUT requests for an object protected by Amazon Web - // Services KMS will fail if not made via SSL or using SigV4. For information about - // configuring using any of the officially supported Amazon Web Services SDKs and - // Amazon Web Services CLI, see Specifying the Signature Version in Request - // Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) - // in the Amazon S3 User Guide. + // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption + // customer managed key to use for object encryption. This functionality is not + // supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For - // more information, see Storage Classes - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in - // the Amazon S3 User Guide. + // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // - For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass types.StorageClass - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // The tag-set for the object. The tag-set must be encoded as URL Query + // parameters. This functionality is not supported for directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. + // value of this header in the object metadata. This functionality is not supported + // for directory buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } +func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type CreateMultipartUploadOutput struct { - // If the bucket has a lifecycle rule configured with an action to abort incomplete - // multipart uploads and the prefix in the lifecycle rule matches the object name - // in the request, the response includes this header. The header indicates when the - // initiated multipart upload becomes eligible for an abort operation. For more - // information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle - // Policy - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). - // The response also includes the x-amz-abort-rule-id header that provides the ID - // of the lifecycle configuration rule that defines this action. + // If the bucket has a lifecycle rule configured with an action to abort + // incomplete multipart uploads and the prefix in the lifecycle rule matches the + // object name in the request, the response includes this header. The header + // indicates when the initiated multipart upload becomes eligible for an abort + // operation. For more information, see Aborting Incomplete Multipart Uploads + // Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. The response also includes the x-amz-abort-rule-id + // header that provides the ID of the lifecycle configuration rule that defines the + // abort action. This functionality is not supported for directory buckets. AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // the applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. + // incomplete multipart uploads. This functionality is not supported for directory + // buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not - // return the access point ARN or access point alias if used. When using this - // action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // return the access point ARN or access point alias if used. Access points are not + // supported by directory buckets. Bucket *string // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool // The algorithm that was used to create a checksum of the object. ChecksumAlgorithm types.ChecksumAlgorithm @@ -458,30 +499,35 @@ type CreateMultipartUploadOutput struct { Key *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm used. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // If present, indicates the Amazon Web Services KMS Encryption Context to use for // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. + // holding JSON with the encryption context key-value pairs. This functionality is + // not supported for directory buckets. SSEKMSEncryptionContext *string - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for the - // object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // ID for the initiated multipart upload. @@ -494,6 +540,9 @@ type CreateMultipartUploadOutput struct { } func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpCreateMultipartUpload{}, middleware.After) if err != nil { return err @@ -502,6 +551,13 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateMultipartUpload"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -520,16 +576,13 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -538,7 +591,10 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil { @@ -550,6 +606,9 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addCreateMultipartUploadUpdateEndpoint(stack, options); err != nil { return err } @@ -565,14 +624,29 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware. if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil { + return err + } return nil } +func (v *CreateMultipartUploadInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opCreateMultipartUpload(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "CreateMultipartUpload", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go new file mode 100644 index 00000000..1cb1f151 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateSession.go @@ -0,0 +1,260 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Creates a session that establishes temporary security credentials to support +// fast authentication and authorization for the Zonal endpoint APIs on directory +// buckets. For more information about Zonal endpoint APIs that include the +// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) +// in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory +// bucket, use the CreateSession API operation. Specifically, you grant +// s3express:CreateSession permission to a bucket in a bucket policy or an IAM +// identity-based policy. Then, you use IAM credentials to make the CreateSession +// API request on the bucket, which returns temporary security credentials that +// include the access key ID, secret access key, session token, and expiration. +// These credentials have associated permissions to access the Zonal endpoint APIs. +// After the session is created, you don’t need to use other policies to grant +// permissions to each Zonal endpoint API individually. Instead, in your Zonal +// endpoint API requests, you sign your requests by applying the temporary security +// credentials of the session to the request headers and following the SigV4 +// protocol for authentication. You also apply the session token to the +// x-amz-s3session-token request header for authorization. Temporary security +// credentials are scoped to the bucket and expire after 5 minutes. After the +// expiration time, any calls that you make with those credentials will fail. You +// must use IAM credentials again to make a CreateSession API request that +// generates a new set of temporary credentials for use. Temporary credentials +// cannot be extended or refreshed beyond the original specified interval. If you +// use Amazon Web Services SDKs, SDKs handle the session token refreshes +// automatically to avoid service interruptions when a session expires. We +// recommend that you use the Amazon Web Services SDKs to initiate and manage +// requests to the CreateSession API. For more information, see Performance +// guidelines and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) +// in the Amazon S3 User Guide. +// - You must make requests for this API operation to the Zonal endpoint. These +// endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests +// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject +// API operation doesn't use the temporary security credentials returned from the +// CreateSession API operation for authentication and authorization. For +// information about authentication and authorization of the CopyObject API +// operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// . +// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket +// API operation doesn't use the temporary security credentials returned from the +// CreateSession API operation for authentication and authorization. For +// information about authentication and authorization of the HeadBucket API +// operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) +// . +// +// Permissions To obtain temporary security credentials, you must create a bucket +// policy or an IAM identity-based policy that grants s3express:CreateSession +// permission to the bucket. In a policy, you can have the s3express:SessionMode +// condition key to control who can create a ReadWrite or ReadOnly session. For +// more information about ReadWrite or ReadOnly sessions, see +// x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters) +// . For example policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint +// APIs, the bucket policy should also grant both accounts the +// s3express:CreateSession permission. HTTP Host header syntax Directory buckets - +// The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) { + if params == nil { + params = &CreateSessionInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "CreateSession", params, optFns, c.addOperationCreateSessionMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*CreateSessionOutput) + out.ResultMetadata = metadata + return out, nil +} + +type CreateSessionInput struct { + + // The name of the bucket that you create a session for. + // + // This member is required. + Bucket *string + + // Specifies the mode of the session that will be created, either ReadWrite or + // ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is + // capable of executing all the Zonal endpoint APIs on a directory bucket. A + // ReadOnly session is constrained to execute the following Zonal endpoint APIs: + // GetObject , HeadObject , ListObjectsV2 , GetObjectAttributes , ListParts , and + // ListMultipartUploads . + SessionMode types.SessionMode + + noSmithyDocumentSerde +} + +func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.DisableS3ExpressSessionAuth = ptr.Bool(true) +} + +type CreateSessionOutput struct { + + // The established temporary security credentials for the created session.. + // + // This member is required. + Credentials *types.SessionCredentials + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpCreateSession{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateSession{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "CreateSession"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = addOpCreateSessionValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSession(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addCreateSessionUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func (v *CreateSessionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + +func newServiceMetadataMiddleware_opCreateSession(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "CreateSession", + } +} + +// getCreateSessionBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, +func getCreateSessionBucketMember(input interface{}) (*string, bool) { + in := input.(*CreateSessionInput) + if in.Bucket == nil { + return nil, false + } + return in.Bucket, true +} +func addCreateSessionUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: getCreateSessionBucketMember, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go index 6bfb43c2..34645bb9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucket.go @@ -4,23 +4,44 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. -// Related Resources +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// * CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// Permissions +// - General purpose bucket permissions - You must have the s3:DeleteBucket +// permission on the specified bucket in a policy. +// - Directory bucket permissions - You must have the s3express:DeleteBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation can +// only be performed by the Amazon Web Services account that owns the resource. For +// more information about directory bucket policies and permissions, see Amazon +// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. // -// * -// DeleteObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . The following operations are related to +// DeleteBucket : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) { if params == nil { params = &DeleteBucketInput{} @@ -38,19 +59,34 @@ func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, op type DeleteBucketInput struct { - // Specifies the bucket being deleted. + // Specifies the bucket being deleted. Directory buckets - When you use this + // operation with a directory bucket, you must use path-style requests in the + // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -59,6 +95,9 @@ type DeleteBucketOutput struct { } func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucket{}, middleware.After) if err != nil { return err @@ -67,6 +106,13 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucket"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -85,16 +131,13 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -103,7 +146,10 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketValidationMiddleware(stack); err != nil { @@ -115,6 +161,9 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketUpdateEndpoint(stack, options); err != nil { return err } @@ -130,14 +179,26 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucket(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucket", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go index e016d976..55d8fc1c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketAnalyticsConfiguration.go @@ -4,38 +4,29 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an analytics configuration for the bucket (specified by the analytics -// configuration ID). To use this operation, you must have permissions to perform -// the s3:PutAnalyticsConfiguration action. The bucket owner has this permission by +// This operation is not supported by directory buckets. Deletes an analytics +// configuration for the bucket (specified by the analytics configuration ID). To +// use this operation, you must have permissions to perform the +// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – -// Storage Class Analysis -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). -// The following operations are related to DeleteBucketAnalyticsConfiguration: -// -// * -// GetBucketAnalyticsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// -// * -// ListBucketAnalyticsConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// -// * -// PutBucketAnalyticsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about the Amazon S3 analytics feature, see Amazon S3 +// Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// . The following operations are related to DeleteBucketAnalyticsConfiguration : +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) func (c *Client) DeleteBucketAnalyticsConfiguration(ctx context.Context, params *DeleteBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &DeleteBucketAnalyticsConfigurationInput{} @@ -63,14 +54,19 @@ type DeleteBucketAnalyticsConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketAnalyticsConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -79,6 +75,9 @@ type DeleteBucketAnalyticsConfigurationOutput struct { } func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration{}, middleware.After) if err != nil { return err @@ -87,6 +86,13 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketAnalyticsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -105,16 +111,13 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -123,7 +126,10 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { @@ -135,6 +141,9 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -150,14 +159,26 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketAnalyticsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketAnalyticsConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go index 79045abe..9a544922 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketCors.go @@ -4,26 +4,23 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the cors configuration information set for the bucket. To use this -// operation, you must have permission to perform the s3:PutBucketCORS action. The -// bucket owner has this permission by default and can grant this permission to -// others. For information about cors, see Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 -// User Guide. Related Resources: -// -// * PutBucketCors -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// -// * -// RESTOPTIONSobject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// This operation is not supported by directory buckets. Deletes the cors +// configuration information set for the bucket. To use this operation, you must +// have permission to perform the s3:PutBucketCORS action. The bucket owner has +// this permission by default and can grant this permission to others. For +// information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. Related Resources +// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) func (c *Client) DeleteBucketCors(ctx context.Context, params *DeleteBucketCorsInput, optFns ...func(*Options)) (*DeleteBucketCorsOutput, error) { if params == nil { params = &DeleteBucketCorsInput{} @@ -46,14 +43,19 @@ type DeleteBucketCorsInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketCorsOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -62,6 +64,9 @@ type DeleteBucketCorsOutput struct { } func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketCors{}, middleware.After) if err != nil { return err @@ -70,6 +75,13 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketCors"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -88,16 +100,13 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -106,7 +115,10 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil { @@ -118,6 +130,9 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketCorsUpdateEndpoint(stack, options); err != nil { return err } @@ -133,14 +148,26 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketCorsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketCors", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go index 9c3201f6..0ef6f93e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketEncryption.go @@ -4,33 +4,29 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the DELETE action removes default encryption from the -// bucket. For information about the Amazon S3 default encryption feature, see -// Amazon S3 Default Bucket Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the -// Amazon S3 User Guide. To use this operation, you must have permissions to +// This operation is not supported by directory buckets. This implementation of +// the DELETE action resets the default encryption for the bucket as server-side +// encryption with Amazon S3 managed keys (SSE-S3). For information about the +// bucket default encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. To use this operation, you must have permissions to // perform the s3:PutEncryptionConfiguration action. The bucket owner has this // permission by default. The bucket owner can grant this permission to others. For // more information about permissions, see Permissions Related to Bucket -// Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. Related Resources -// -// * PutBucketEncryption -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// -// * -// GetBucketEncryption -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. The following operations are related to +// DeleteBucketEncryption : +// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) func (c *Client) DeleteBucketEncryption(ctx context.Context, params *DeleteBucketEncryptionInput, optFns ...func(*Options)) (*DeleteBucketEncryptionOutput, error) { if params == nil { params = &DeleteBucketEncryptionInput{} @@ -54,14 +50,19 @@ type DeleteBucketEncryptionInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketEncryptionOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -70,6 +71,9 @@ type DeleteBucketEncryptionOutput struct { } func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketEncryption{}, middleware.After) if err != nil { return err @@ -78,6 +82,13 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketEncryption"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -96,16 +107,13 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -114,7 +122,10 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil { @@ -126,6 +137,9 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketEncryptionUpdateEndpoint(stack, options); err != nil { return err } @@ -141,14 +155,26 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketEncryptionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketEncryption", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go index 139dd78a..eeaefc5f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketIntelligentTieringConfiguration.go @@ -4,15 +4,18 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. The -// S3 Intelligent-Tiering storage class is designed to optimize storage costs by +// This operation is not supported by directory buckets. Deletes the S3 +// Intelligent-Tiering configuration from the specified bucket. The S3 +// Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering // delivers automatic cost savings in three low latency and high throughput access @@ -24,21 +27,11 @@ import ( // monitored and not eligible for auto-tiering. Smaller objects can be stored, but // they are always charged at the Frequent Access tier rates in the S3 // Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). -// Operations related to DeleteBucketIntelligentTieringConfiguration include: -// -// * -// GetBucketIntelligentTieringConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// -// * -// PutBucketIntelligentTieringConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// -// * -// ListBucketIntelligentTieringConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . Operations related to DeleteBucketIntelligentTieringConfiguration include: +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) func (c *Client) DeleteBucketIntelligentTieringConfiguration(ctx context.Context, params *DeleteBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &DeleteBucketIntelligentTieringConfigurationInput{} @@ -70,6 +63,11 @@ type DeleteBucketIntelligentTieringConfigurationInput struct { noSmithyDocumentSerde } +func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketIntelligentTieringConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -78,6 +76,9 @@ type DeleteBucketIntelligentTieringConfigurationOutput struct { } func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration{}, middleware.After) if err != nil { return err @@ -86,6 +87,13 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketIntelligentTieringConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -104,16 +112,13 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -122,7 +127,10 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { @@ -134,6 +142,9 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -149,14 +160,26 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketIntelligentTieringConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go index 32fe81f1..28a93e0d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketInventoryConfiguration.go @@ -4,37 +4,27 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes an inventory configuration (identified by the inventory ID) from the -// bucket. To use this operation, you must have permissions to perform the -// s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). -// Operations related to DeleteBucketInventoryConfiguration include: -// -// * -// GetBucketInventoryConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// -// * -// PutBucketInventoryConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) -// -// * -// ListBucketInventoryConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// This operation is not supported by directory buckets. Deletes an inventory +// configuration (identified by the inventory ID) from the bucket. To use this +// operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// . Operations related to DeleteBucketInventoryConfiguration include: +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) func (c *Client) DeleteBucketInventoryConfiguration(ctx context.Context, params *DeleteBucketInventoryConfigurationInput, optFns ...func(*Options)) (*DeleteBucketInventoryConfigurationOutput, error) { if params == nil { params = &DeleteBucketInventoryConfigurationInput{} @@ -62,14 +52,19 @@ type DeleteBucketInventoryConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketInventoryConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -78,6 +73,9 @@ type DeleteBucketInventoryConfigurationOutput struct { } func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketInventoryConfiguration{}, middleware.After) if err != nil { return err @@ -86,6 +84,13 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketInventoryConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -104,16 +109,13 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -122,7 +124,10 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil { @@ -134,6 +139,9 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -149,14 +157,26 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketInventoryConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketInventoryConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go index c110bfb4..d7b8eb58 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketLifecycle.go @@ -4,32 +4,29 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the lifecycle configuration from the specified bucket. Amazon S3 removes -// all the lifecycle configuration rules in the lifecycle subresource associated -// with the bucket. Your objects never expire, and Amazon S3 no longer -// automatically deletes any objects on the basis of rules contained in the deleted -// lifecycle configuration. To use this operation, you must have permission to -// perform the s3:PutLifecycleConfiguration action. By default, the bucket owner -// has this permission and the bucket owner can grant this permission to others. -// There is usually some time lag before lifecycle configuration deletion is fully +// This operation is not supported by directory buckets. Deletes the lifecycle +// configuration from the specified bucket. Amazon S3 removes all the lifecycle +// configuration rules in the lifecycle subresource associated with the bucket. +// Your objects never expire, and Amazon S3 no longer automatically deletes any +// objects on the basis of rules contained in the deleted lifecycle configuration. +// To use this operation, you must have permission to perform the +// s3:PutLifecycleConfiguration action. By default, the bucket owner has this +// permission and the bucket owner can grant this permission to others. There is +// usually some time lag before lifecycle configuration deletion is fully // propagated to all the Amazon S3 systems. For more information about the object -// expiration, see Elements to Describe Lifecycle Actions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). -// Related actions include: -// -// * PutBucketLifecycleConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// -// * -// GetBucketLifecycleConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// expiration, see Elements to Describe Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions) +// . Related actions include: +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) func (c *Client) DeleteBucketLifecycle(ctx context.Context, params *DeleteBucketLifecycleInput, optFns ...func(*Options)) (*DeleteBucketLifecycleOutput, error) { if params == nil { params = &DeleteBucketLifecycleInput{} @@ -52,14 +49,19 @@ type DeleteBucketLifecycleInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketLifecycleOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -68,6 +70,9 @@ type DeleteBucketLifecycleOutput struct { } func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketLifecycle{}, middleware.After) if err != nil { return err @@ -76,6 +81,13 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketLifecycle"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -94,16 +106,13 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -112,7 +121,10 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil { @@ -124,6 +136,9 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketLifecycleUpdateEndpoint(stack, options); err != nil { return err } @@ -139,14 +154,26 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware. if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketLifecycleInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketLifecycle(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketLifecycle", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go index cc08f04b..a6675a88 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketMetricsConfiguration.go @@ -4,43 +4,31 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes a metrics configuration for the Amazon CloudWatch request metrics -// (specified by the metrics configuration ID) from the bucket. Note that this -// doesn't include the daily storage metrics. To use this operation, you must have -// permissions to perform the s3:PutMetricsConfiguration action. The bucket owner -// has this permission by default. The bucket owner can grant this permission to -// others. For more information about permissions, see Permissions Related to -// Bucket Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// For information about CloudWatch request metrics for Amazon S3, see Monitoring -// Metrics with Amazon CloudWatch -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// The following operations are related to DeleteBucketMetricsConfiguration: -// -// * -// GetBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// -// * -// PutBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// -// * -// ListBucketMetricsConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// -// * -// Monitoring Metrics with Amazon CloudWatch -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported by directory buckets. Deletes a metrics +// configuration for the Amazon CloudWatch request metrics (specified by the +// metrics configuration ID) from the bucket. Note that this doesn't include the +// daily storage metrics. To use this operation, you must have permissions to +// perform the s3:PutMetricsConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about CloudWatch request metrics for Amazon S3, see +// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// . The following operations are related to DeleteBucketMetricsConfiguration : +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) func (c *Client) DeleteBucketMetricsConfiguration(ctx context.Context, params *DeleteBucketMetricsConfigurationInput, optFns ...func(*Options)) (*DeleteBucketMetricsConfigurationOutput, error) { if params == nil { params = &DeleteBucketMetricsConfigurationInput{} @@ -63,19 +51,25 @@ type DeleteBucketMetricsConfigurationInput struct { // This member is required. Bucket *string - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketMetricsConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -84,6 +78,9 @@ type DeleteBucketMetricsConfigurationOutput struct { } func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketMetricsConfiguration{}, middleware.After) if err != nil { return err @@ -92,6 +89,13 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketMetricsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -110,16 +114,13 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -128,7 +129,10 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil { @@ -140,6 +144,9 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -155,14 +162,26 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack * if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketMetricsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketMetricsConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go index 6186db5e..1b53e08e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketOwnershipControls.go @@ -4,25 +4,23 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you -// must have the s3:PutBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// For information about Amazon S3 Object Ownership, see Using Object Ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). -// The following operations are related to DeleteBucketOwnershipControls: -// -// * -// GetBucketOwnershipControls -// -// * PutBucketOwnershipControls +// This operation is not supported by directory buckets. Removes OwnershipControls +// for an Amazon S3 bucket. To use this operation, you must have the +// s3:PutBucketOwnershipControls permission. For more information about Amazon S3 +// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html) +// . The following operations are related to DeleteBucketOwnershipControls : +// - GetBucketOwnershipControls +// - PutBucketOwnershipControls func (c *Client) DeleteBucketOwnershipControls(ctx context.Context, params *DeleteBucketOwnershipControlsInput, optFns ...func(*Options)) (*DeleteBucketOwnershipControlsOutput, error) { if params == nil { params = &DeleteBucketOwnershipControlsInput{} @@ -45,14 +43,19 @@ type DeleteBucketOwnershipControlsInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketOwnershipControlsOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -61,6 +64,9 @@ type DeleteBucketOwnershipControlsOutput struct { } func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketOwnershipControls{}, middleware.After) if err != nil { return err @@ -69,6 +75,13 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketOwnershipControls"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -87,16 +100,13 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -105,7 +115,10 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil { @@ -117,6 +130,9 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { return err } @@ -132,14 +148,26 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketOwnershipControlsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketOwnershipControls", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go index 618d9bed..0d8e77ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketPolicy.go @@ -4,35 +4,53 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the DELETE action uses the policy subresource to delete -// the policy of a specified bucket. If you are using an identity other than the -// root user of the Amazon Web Services account that owns the bucket, the calling -// identity must have the DeleteBucketPolicy permissions on the specified bucket -// and belong to the bucket owner's account to use this operation. If you don't -// have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied -// error. If you have the correct permissions, but you're not using an identity -// that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not -// Allowed error. As a security precaution, the root user of the Amazon Web -// Services account that owns a bucket can always use this operation, even if the -// policy explicitly denies the root user the ability to perform this action. For -// more information about bucket policies, see Using Bucket Policies and -// UserPolicies -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The -// following operations are related to DeleteBucketPolicy +// Deletes the policy of a specified bucket. Directory buckets - For directory +// buckets, you must make requests for this API operation to the Regional endpoint. +// These endpoints support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must both have the DeleteBucketPolicy permissions on the +// specified bucket and belong to the bucket owner's account in order to use this +// operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 returns +// a 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. To ensure that bucket owners don't +// inadvertently lock themselves out of their own buckets, the root principal in a +// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , +// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket +// policy explicitly denies the root principal's access. Bucket owner root +// principals can only be blocked from performing these API actions by VPC endpoint +// policies and Amazon Web Services Organizations policies. +// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is +// required in a policy. For more information about general purpose buckets bucket +// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:DeleteBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. // -// * CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// * -// DeleteObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// s3express-control.region.amazonaws.com . The following operations are related to +// DeleteBucketPolicy +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) { if params == nil { params = &DeleteBucketPolicyInput{} @@ -50,19 +68,34 @@ func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPol type DeleteBucketPolicyInput struct { - // The bucket name. + // The bucket name. Directory buckets - When you use this operation with a + // directory bucket, you must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketPolicyOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -71,6 +104,9 @@ type DeleteBucketPolicyOutput struct { } func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketPolicy{}, middleware.After) if err != nil { return err @@ -79,6 +115,13 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -97,16 +140,13 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -115,7 +155,10 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil { @@ -127,6 +170,9 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketPolicyUpdateEndpoint(stack, options); err != nil { return err } @@ -142,14 +188,26 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketPolicyInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketPolicy", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go index ad2d772d..7ac11a7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketReplication.go @@ -4,33 +4,27 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the replication configuration from the bucket. To use this operation, -// you must have permissions to perform the s3:PutReplicationConfiguration action. -// The bucket owner has these permissions by default and can grant it to others. -// For more information about permissions, see Permissions Related to Bucket -// Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// It can take a while for the deletion of a replication configuration to fully -// propagate. For information about replication configuration, see Replication -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon -// S3 User Guide. The following operations are related to -// DeleteBucketReplication: -// -// * PutBucketReplication -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// -// * -// GetBucketReplication -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// This operation is not supported by directory buckets. Deletes the replication +// configuration from the bucket. To use this operation, you must have permissions +// to perform the s3:PutReplicationConfiguration action. The bucket owner has +// these permissions by default and can grant it to others. For more information +// about permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . It can take a while for the deletion of a replication configuration to fully +// propagate. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. The following operations are related to +// DeleteBucketReplication : +// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) func (c *Client) DeleteBucketReplication(ctx context.Context, params *DeleteBucketReplicationInput, optFns ...func(*Options)) (*DeleteBucketReplicationOutput, error) { if params == nil { params = &DeleteBucketReplicationInput{} @@ -53,14 +47,19 @@ type DeleteBucketReplicationInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketReplicationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -69,6 +68,9 @@ type DeleteBucketReplicationOutput struct { } func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketReplication{}, middleware.After) if err != nil { return err @@ -77,6 +79,13 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketReplication"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -95,16 +104,13 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -113,7 +119,10 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil { @@ -125,6 +134,9 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketReplicationUpdateEndpoint(stack, options); err != nil { return err } @@ -140,14 +152,26 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketReplicationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketReplication", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go index 063f0bc5..5a75a1f4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketTagging.go @@ -4,24 +4,22 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Deletes the tags from the bucket. To use this operation, you must have -// permission to perform the s3:PutBucketTagging action. By default, the bucket -// owner has this permission and can grant this permission to others. The following -// operations are related to DeleteBucketTagging: -// -// * GetBucketTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// -// * -// PutBucketTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// This operation is not supported by directory buckets. Deletes the tags from the +// bucket. To use this operation, you must have permission to perform the +// s3:PutBucketTagging action. By default, the bucket owner has this permission and +// can grant this permission to others. The following operations are related to +// DeleteBucketTagging : +// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) { if params == nil { params = &DeleteBucketTaggingInput{} @@ -44,14 +42,19 @@ type DeleteBucketTaggingInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketTaggingOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -60,6 +63,9 @@ type DeleteBucketTaggingOutput struct { } func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketTagging{}, middleware.After) if err != nil { return err @@ -68,6 +74,13 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -86,16 +99,13 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -104,7 +114,10 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil { @@ -116,6 +129,9 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketTaggingUpdateEndpoint(stack, options); err != nil { return err } @@ -131,14 +147,26 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketTagging", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go index 7eb72b86..dbe84fbb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteBucketWebsite.go @@ -4,33 +4,29 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action removes the website configuration for a bucket. Amazon S3 returns a -// 200 OK response upon successfully deleting a website configuration on the -// specified bucket. You will get a 200 OK response if the website configuration -// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 -// response if the bucket specified in the request does not exist. This DELETE -// action requires the S3:DeleteBucketWebsite permission. By default, only the -// bucket owner can delete the website configuration attached to a bucket. However, -// bucket owners can grant other users permission to delete the website -// configuration by writing a bucket policy granting them the -// S3:DeleteBucketWebsite permission. For more information about hosting websites, -// see Hosting Websites on Amazon S3 -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). The -// following operations are related to DeleteBucketWebsite: -// -// * GetBucketWebsite -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) -// -// * -// PutBucketWebsite -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// This operation is not supported by directory buckets. This action removes the +// website configuration for a bucket. Amazon S3 returns a 200 OK response upon +// successfully deleting a website configuration on the specified bucket. You will +// get a 200 OK response if the website configuration you are trying to delete +// does not exist on the bucket. Amazon S3 returns a 404 response if the bucket +// specified in the request does not exist. This DELETE action requires the +// S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete +// the website configuration attached to a bucket. However, bucket owners can grant +// other users permission to delete the website configuration by writing a bucket +// policy granting them the S3:DeleteBucketWebsite permission. For more +// information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// . The following operations are related to DeleteBucketWebsite : +// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) func (c *Client) DeleteBucketWebsite(ctx context.Context, params *DeleteBucketWebsiteInput, optFns ...func(*Options)) (*DeleteBucketWebsiteOutput, error) { if params == nil { params = &DeleteBucketWebsiteInput{} @@ -53,14 +49,19 @@ type DeleteBucketWebsiteInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeleteBucketWebsiteOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -69,6 +70,9 @@ type DeleteBucketWebsiteOutput struct { } func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteBucketWebsite{}, middleware.After) if err != nil { return err @@ -77,6 +81,13 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBucketWebsite"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -95,16 +106,13 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -113,7 +121,10 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil { @@ -125,6 +136,9 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteBucketWebsiteUpdateEndpoint(stack, options); err != nil { return err } @@ -140,14 +154,26 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteBucketWebsiteInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteBucketWebsite", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go index 4b4f1212..cd00e599 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,30 +13,70 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a null -// version, Amazon S3 does not remove any objects but will still respond that the -// command was successful. To remove a specific version, you must be the bucket -// owner and you must use the version Id subresource. Using this subresource -// permanently deletes the version. If the object deleted is a delete marker, -// Amazon S3 sets the response header, x-amz-delete-marker, to true. If the object -// you want to delete is in a bucket where the bucket versioning configuration is -// MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE -// versionId request. Requests that include x-amz-mfa must use HTTPS. For more -// information about MFA Delete, see Using MFA Delete -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). To see -// sample requests that use versioning, see Sample Request -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). -// You can delete objects by explicitly calling DELETE Object or configure its -// lifecycle (PutBucketLifecycle -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) -// to enable Amazon S3 to remove them for you. If you want to block users or +// Removes an object from a bucket. The behavior depends on the bucket's +// versioning state: +// +// - If versioning is enabled, the operation removes the null version (if there +// is one) of an object and inserts a delete marker, which becomes the latest +// version of the object. If there isn't a null version, Amazon S3 does not remove +// any objects but will still respond that the command was successful. +// +// - If versioning is suspended or not enabled, the operation permanently +// deletes the object. +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// To remove a specific version, you must use the versionId query parameter. Using +// this query parameter permanently deletes the version. If the object deleted is a +// delete marker, Amazon S3 sets the response header x-amz-delete-marker to true. +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) +// in the Amazon S3 User Guide. To see sample requests that use versioning, see +// Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete) +// . Directory buckets - MFA delete is not supported by directory buckets. You can +// delete objects by explicitly calling DELETE Object or calling ( +// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// ) to enable Amazon S3 to remove them for you. If you want to block users or // accounts from removing or deleting objects from your bucket, you must deny them -// the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration -// actions. The following action is related to DeleteObject: +// the s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration +// actions. Directory buckets - S3 Lifecycle is not supported by directory buckets. +// Permissions +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// - s3:DeleteObject - To delete an object from a bucket, you must always have +// the s3:DeleteObject permission. +// - s3:DeleteObjectVersion - To delete a specific version of an object from a +// versiong-enabled bucket, you must have the s3:DeleteObjectVersion permission. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following action is +// related to DeleteObject : +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) { if params == nil { params = &DeleteObjectInput{} @@ -53,23 +94,31 @@ func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, op type DeleteObjectInput struct { - // The bucket name of the bucket containing the object. When using this action with - // an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The bucket name of the bucket containing the object. Directory buckets - When + // you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -81,45 +130,60 @@ type DeleteObjectInput struct { // Indicates whether S3 Object Lock should bypass Governance-mode restrictions to // process this operation. To use this header, you must have the - // s3:BypassGovernanceRetention permission. - BypassGovernanceRetention bool + // s3:BypassGovernanceRetention permission. This functionality is not supported for + // directory buckets. + BypassGovernanceRetention *bool - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // The concatenation of the authentication device's serial number, a space, and the - // value that is displayed on your authentication device. Required to permanently - // delete a versioned object if versioning is configured with MFA delete enabled. + // The concatenation of the authentication device's serial number, a space, and + // the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. This functionality is not supported for directory buckets. MFA *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. For directory + // buckets in this API operation, only the null value of the version ID is + // supported. VersionId *string noSmithyDocumentSerde } +func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type DeleteObjectOutput struct { - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. - DeleteMarker bool + // Indicates whether the specified object version that was permanently deleted was + // (true) or was not (false) a delete marker before deletion. In a simple DELETE, + // this header indicates whether (true) or not (false) the current version of the + // object is a delete marker. This functionality is not supported for directory + // buckets. + DeleteMarker *bool // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Returns the version ID of the delete marker created as a result of the DELETE - // operation. + // operation. This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -129,6 +193,9 @@ type DeleteObjectOutput struct { } func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObject{}, middleware.After) if err != nil { return err @@ -137,6 +204,13 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -155,16 +229,13 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -173,7 +244,10 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteObjectValidationMiddleware(stack); err != nil { @@ -185,6 +259,9 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteObjectUpdateEndpoint(stack, options); err != nil { return err } @@ -200,14 +277,26 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteObject(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteObject", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go index 0d1bb739..961b9459 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjectTagging.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -11,21 +12,16 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes the entire tag set from the specified object. For more information about -// managing object tags, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). To use -// this operation, you must have permission to perform the s3:DeleteObjectTagging -// action. To delete tags of a specific object version, add the versionId query -// parameter in the request. You will need permission for the +// This operation is not supported by directory buckets. Removes the entire tag +// set from the specified object. For more information about managing object tags, +// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) +// . To use this operation, you must have permission to perform the +// s3:DeleteObjectTagging action. To delete tags of a specific object version, add +// the versionId query parameter in the request. You will need permission for the // s3:DeleteObjectVersionTagging action. The following operations are related to -// DeleteBucketMetricsConfiguration: -// -// * PutObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) -// -// * -// GetObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// DeleteObjectTagging : +// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTaggingInput, optFns ...func(*Options)) (*DeleteObjectTaggingOutput, error) { if params == nil { params = &DeleteObjectTaggingInput{} @@ -43,23 +39,23 @@ func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTa type DeleteObjectTaggingInput struct { - // The bucket name containing the objects from which to remove the tags. When using - // this action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form + // The bucket name containing the objects from which to remove the tags. Access + // points - When you use this action with an access point, you must provide the + // alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the + // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -69,9 +65,9 @@ type DeleteObjectTaggingInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The versionId of the object that the tag-set will be removed from. @@ -80,6 +76,11 @@ type DeleteObjectTaggingInput struct { noSmithyDocumentSerde } +func (in *DeleteObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type DeleteObjectTaggingOutput struct { // The versionId of the object the tag-set was removed from. @@ -92,6 +93,9 @@ type DeleteObjectTaggingOutput struct { } func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjectTagging{}, middleware.After) if err != nil { return err @@ -100,6 +104,13 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjectTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -118,16 +129,13 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -136,7 +144,10 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil { @@ -148,6 +159,9 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteObjectTaggingUpdateEndpoint(stack, options); err != nil { return err } @@ -163,14 +177,26 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeleteObjectTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteObjectTagging", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go index 50078da5..1d1fa432 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -13,51 +14,77 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action enables you to delete multiple objects from a bucket using a single -// HTTP request. If you know the object keys that you want to delete, then this -// action provides a suitable alternative to sending individual delete requests, -// reducing per-request overhead. The request contains a list of up to 1000 keys -// that you want to delete. In the XML, you provide the object key names, and -// optionally, version IDs if you want to delete a specific version of the object -// from a versioning-enabled bucket. For each key, Amazon S3 performs a delete -// action and returns the result of that delete, success, or failure, in the -// response. Note that if the object specified in the request is not found, Amazon -// S3 returns the result as deleted. The action supports two modes for the -// response: verbose and quiet. By default, the action uses verbose mode in which -// the response includes the result of deletion of each key in your request. In -// quiet mode the response includes only keys where the delete action encountered -// an error. For a successful deletion, the action does not return any information -// about the delete in the response body. When performing this action on an MFA -// Delete enabled bucket, that attempts to delete any versioned objects, you must -// include an MFA token. If you do not provide one, the entire request will fail, -// even if there are non-versioned objects you are trying to delete. If you provide -// an invalid token, whether there are versioned keys in the request or not, the -// entire Multi-Object Delete request will fail. For information about MFA Delete, -// see MFA Delete -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete). -// Finally, the Content-MD5 header is required for all Multi-Object Delete -// requests. Amazon S3 uses the header value to ensure that your request body has -// not been altered in transit. The following operations are related to -// DeleteObjects: +// This operation enables you to delete multiple objects from a bucket using a +// single HTTP request. If you know the object keys that you want to delete, then +// this operation provides a suitable alternative to sending individual delete +// requests, reducing per-request overhead. The request can contain a list of up to +// 1000 keys that you want to delete. In the XML, you provide the object key names, +// and optionally, version IDs if you want to delete a specific version of the +// object from a versioning-enabled bucket. For each key, Amazon S3 performs a +// delete operation and returns the result of that delete, success or failure, in +// the response. Note that if the object specified in the request is not found, +// Amazon S3 returns the result as deleted. +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// * CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion in a quiet mode, the operation does not return any +// information about the delete in the response body. When performing this action +// on an MFA Delete enabled bucket, that attempts to delete any versioned objects, +// you must include an MFA token. If you do not provide one, the entire request +// will fail, even if there are non-versioned objects you are trying to delete. If +// you provide an invalid token, whether there are versioned keys in the request or +// not, the entire Multi-Object Delete request will fail. For information about MFA +// Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) +// in the Amazon S3 User Guide. Directory buckets - MFA delete is not supported by +// directory buckets. Permissions +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// - s3:DeleteObject - To delete an object from a bucket, you must always specify +// the s3:DeleteObject permission. +// - s3:DeleteObjectVersion - To delete a specific version of an object from a +// versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +// permission. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * -// UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// Content-MD5 request header +// - General purpose bucket - The Content-MD5 request header is required for all +// Multi-Object Delete requests. Amazon S3 uses the header value to ensure that +// your request body has not been altered in transit. +// - Directory bucket - The Content-MD5 request header or a additional checksum +// request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , +// x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all +// Multi-Object Delete requests. // -// * -// CompleteMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// * -// ListParts -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// * -// AbortMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to DeleteObjects : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, optFns ...func(*Options)) (*DeleteObjectsOutput, error) { if params == nil { params = &DeleteObjectsInput{} @@ -75,23 +102,31 @@ func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput, type DeleteObjectsInput struct { - // The bucket name containing the objects to delete. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The bucket name containing the objects to delete. Directory buckets - When you + // use this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -103,43 +138,67 @@ type DeleteObjectsInput struct { // Specifies whether you want to delete this object even if it has a // Governance-type Object Lock in place. To use this header, you must have the - // s3:BypassGovernanceRetention permission. - BypassGovernanceRetention bool + // s3:BypassGovernanceRetention permission. This functionality is not supported for + // directory buckets. + BypassGovernanceRetention *bool - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must - // be the same for all parts and it match the checksum value supplied in the - // CreateMultipartUpload request. + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + // fails the request with the HTTP status code 400 Bad Request . For the + // x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // - CRC32 + // - CRC32C + // - SHA1 + // - SHA256 + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If the individual checksum value you provide + // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set + // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided + // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the + // provided value in x-amz-checksum-algorithm . If you provide an individual + // checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // The concatenation of the authentication device's serial number, a space, and the - // value that is displayed on your authentication device. Required to permanently - // delete a versioned object if versioning is configured with MFA delete enabled. + // The concatenation of the authentication device's serial number, a space, and + // the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. When performing the DeleteObjects operation on an MFA delete + // enabled bucket, which attempts to delete the specified versioned objects, you + // must include an MFA token. If you don't provide an MFA token, the entire request + // will fail, even if there are non-versioned objects that you are trying to + // delete. If you provide an invalid token, whether there are versioned object keys + // in the request or not, the entire Multi-Object Delete request will fail. For + // information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. MFA *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer noSmithyDocumentSerde } +func (in *DeleteObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type DeleteObjectsOutput struct { // Container element for a successful delete. It identifies the object that was @@ -151,7 +210,7 @@ type DeleteObjectsOutput struct { Errors []types.Error // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -161,6 +220,9 @@ type DeleteObjectsOutput struct { } func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeleteObjects{}, middleware.After) if err != nil { return err @@ -169,6 +231,13 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteObjects"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -187,16 +256,13 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -205,7 +271,10 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil { @@ -217,6 +286,9 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -235,20 +307,35 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *DeleteObjectsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeleteObjects(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeleteObjects", } } -// getDeleteObjectsRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. +// getDeleteObjectsRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) { in := input.(*DeleteObjectsInput) if len(in.ChecksumAlgorithm) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go index 3defd538..488d2a79 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeletePublicAccessBlock.go @@ -4,37 +4,26 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this -// operation, you must have the s3:PutBucketPublicAccessBlock permission. For more +// This operation is not supported by directory buckets. Removes the +// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketPublicAccessBlock permission. For more // information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// The following operations are related to DeletePublicAccessBlock: -// -// * Using Amazon -// S3 Block Public Access -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// * -// GetPublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// -// * -// PutPublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// -// * -// GetBucketPolicyStatus -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . The following operations are related to DeletePublicAccessBlock : +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) func (c *Client) DeletePublicAccessBlock(ctx context.Context, params *DeletePublicAccessBlockInput, optFns ...func(*Options)) (*DeletePublicAccessBlockOutput, error) { if params == nil { params = &DeletePublicAccessBlockInput{} @@ -57,14 +46,19 @@ type DeletePublicAccessBlockInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type DeletePublicAccessBlockOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -73,6 +67,9 @@ type DeletePublicAccessBlockOutput struct { } func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpDeletePublicAccessBlock{}, middleware.After) if err != nil { return err @@ -81,6 +78,13 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeletePublicAccessBlock"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -99,16 +103,13 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -117,7 +118,10 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil { @@ -129,6 +133,9 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addDeletePublicAccessBlockUpdateEndpoint(stack, options); err != nil { return err } @@ -144,14 +151,26 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *DeletePublicAccessBlockInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opDeletePublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "DeletePublicAccessBlock", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go index d1690f3e..9a90a88a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -4,38 +4,36 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the GET action uses the accelerate subresource to return -// the Transfer Acceleration state of a bucket, which is either Enabled or -// Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that -// enables you to perform faster data transfers to and from Amazon S3. To use this -// operation, you must have permission to perform the s3:GetAccelerateConfiguration -// action. The bucket owner has this permission by default. The bucket owner can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// This operation is not supported by directory buckets. This implementation of +// the GET action uses the accelerate subresource to return the Transfer +// Acceleration state of a bucket, which is either Enabled or Suspended . Amazon S3 +// Transfer Acceleration is a bucket-level feature that enables you to perform +// faster data transfers to and from Amazon S3. To use this operation, you must +// have permission to perform the s3:GetAccelerateConfiguration action. The bucket +// owner has this permission by default. The bucket owner can grant this permission +// to others. For more information about permissions, see Permissions Related to +// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. You set the Transfer Acceleration state of an // existing bucket to Enabled or Suspended by using the -// PutBucketAccelerateConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) // operation. A GET accelerate request does not return a state value for a bucket // that has no transfer acceleration state. A bucket has no Transfer Acceleration // state if a state has never been set on the bucket. For more information about -// transfer acceleration, see Transfer Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) in -// the Amazon S3 User Guide. Related Resources -// -// * PutBucketAccelerateConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// transfer acceleration, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. The following operations are related to +// GetBucketAccelerateConfiguration : +// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) func (c *Client) GetBucketAccelerateConfiguration(ctx context.Context, params *GetBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*GetBucketAccelerateConfigurationOutput, error) { if params == nil { params = &GetBucketAccelerateConfigurationInput{} @@ -58,16 +56,35 @@ type GetBucketAccelerateConfigurationInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + noSmithyDocumentSerde } +func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketAccelerateConfigurationOutput struct { + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + // The accelerate configuration of the bucket. Status types.BucketAccelerateStatus @@ -78,6 +95,9 @@ type GetBucketAccelerateConfigurationOutput struct { } func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAccelerateConfiguration{}, middleware.After) if err != nil { return err @@ -86,6 +106,13 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAccelerateConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -104,16 +131,13 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -122,7 +146,10 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { @@ -134,6 +161,9 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketAccelerateConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -149,14 +179,26 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack * if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketAccelerateConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketAccelerateConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go index 61076288..36747fc9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAcl.go @@ -4,28 +4,36 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the GET action uses the acl subresource to return the -// access control list (ACL) of a bucket. To use GET to return the ACL of the -// bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is -// granted to the anonymous user, you can return the ACL of the bucket without -// using an authorization header. If your bucket uses the bucket owner enforced -// setting for S3 Object Ownership, requests to read ACLs are still supported and -// return the bucket-owner-full-control ACL with the owner being the account that -// created the bucket. For more information, see Controlling object ownership and -// disabling ACLs -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Related Resources -// -// * ListObjects -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// This operation is not supported by directory buckets. This implementation of +// the GET action uses the acl subresource to return the access control list (ACL) +// of a bucket. To use GET to return the ACL of the bucket, you must have the +// READ_ACP access to the bucket. If READ_ACP permission is granted to the +// anonymous user, you can return the ACL of the bucket without using an +// authorization header. When you use this API operation with an access point, +// provide the alias of the access point in place of the bucket name. When you use +// this API operation with an Object Lambda access point, provide the alias of the +// Object Lambda access point in place of the bucket name. If the Object Lambda +// access point alias in a request is not valid, the error code +// InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// . If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the +// bucket-owner-full-control ACL with the owner being the account that created the +// bucket. For more information, see Controlling object ownership and disabling +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. The following operations are related to +// GetBucketAcl : +// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, optFns ...func(*Options)) (*GetBucketAclOutput, error) { if params == nil { params = &GetBucketAclInput{} @@ -43,19 +51,31 @@ func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, op type GetBucketAclInput struct { - // Specifies the S3 bucket whose ACL is being requested. + // Specifies the S3 bucket whose ACL is being requested. When you use this API + // operation with an access point, provide the alias of the access point in place + // of the bucket name. When you use this API operation with an Object Lambda access + // point, provide the alias of the Object Lambda access point in place of the + // bucket name. If the Object Lambda access point alias in a request is not valid, + // the error code InvalidAccessPointAliasError is returned. For more information + // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketAclOutput struct { // A list of grants. @@ -71,6 +91,9 @@ type GetBucketAclOutput struct { } func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAcl{}, middleware.After) if err != nil { return err @@ -79,6 +102,13 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAcl"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -97,16 +127,13 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -115,7 +142,10 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketAclValidationMiddleware(stack); err != nil { @@ -127,6 +157,9 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketAclUpdateEndpoint(stack, options); err != nil { return err } @@ -142,14 +175,26 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketAclInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketAcl", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go index bf2c3be6..0f7922ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAnalyticsConfiguration.go @@ -4,39 +4,31 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the GET action returns an analytics configuration -// (identified by the analytics configuration ID) from the bucket. To use this -// operation, you must have permissions to perform the s3:GetAnalyticsConfiguration -// action. The bucket owner has this permission by default. The bucket owner can -// grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// This operation is not supported by directory buckets. This implementation of +// the GET action returns an analytics configuration (identified by the analytics +// configuration ID) from the bucket. To use this operation, you must have +// permissions to perform the s3:GetAnalyticsConfiguration action. The bucket +// owner has this permission by default. The bucket owner can grant this permission +// to others. For more information about permissions, see Permissions Related to +// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // in the Amazon S3 User Guide. For information about Amazon S3 analytics feature, -// see Amazon S3 Analytics – Storage Class Analysis -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon S3 User Guide. Related Resources -// -// * -// DeleteBucketAnalyticsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// -// * -// ListBucketAnalyticsConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) -// -// * -// PutBucketAnalyticsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon S3 User Guide. The following operations are related to +// GetBucketAnalyticsConfiguration : +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) func (c *Client) GetBucketAnalyticsConfiguration(ctx context.Context, params *GetBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*GetBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &GetBucketAnalyticsConfigurationInput{} @@ -64,14 +56,19 @@ type GetBucketAnalyticsConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketAnalyticsConfigurationOutput struct { // The configuration and any analyses for the analytics filter. @@ -84,6 +81,9 @@ type GetBucketAnalyticsConfigurationOutput struct { } func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketAnalyticsConfiguration{}, middleware.After) if err != nil { return err @@ -92,6 +92,13 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketAnalyticsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -110,16 +117,13 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -128,7 +132,10 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { @@ -140,6 +147,9 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -155,14 +165,26 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketAnalyticsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketAnalyticsConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go index 0ed61273..33c25aa1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketCors.go @@ -4,28 +4,31 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the Cross-Origin Resource Sharing (CORS) configuration information set -// for the bucket. To use this operation, you must have permission to perform the -// s3:GetBucketCORS action. By default, the bucket owner has this permission and -// can grant it to others. For more information about CORS, see Enabling -// Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). The following -// operations are related to GetBucketCors: -// -// * PutBucketCors -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) -// -// * -// DeleteBucketCors -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// This operation is not supported by directory buckets. Returns the Cross-Origin +// Resource Sharing (CORS) configuration information set for the bucket. To use +// this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it to +// others. When you use this API operation with an access point, provide the alias +// of the access point in place of the bucket name. When you use this API operation +// with an Object Lambda access point, provide the alias of the Object Lambda +// access point in place of the bucket name. If the Object Lambda access point +// alias in a request is not valid, the error code InvalidAccessPointAliasError is +// returned. For more information about InvalidAccessPointAliasError , see List of +// Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// . For more information about CORS, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// . The following operations are related to GetBucketCors : +// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, optFns ...func(*Options)) (*GetBucketCorsOutput, error) { if params == nil { params = &GetBucketCorsInput{} @@ -43,19 +46,31 @@ func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput, type GetBucketCorsInput struct { - // The bucket name for which to get the cors configuration. + // The bucket name for which to get the cors configuration. When you use this API + // operation with an access point, provide the alias of the access point in place + // of the bucket name. When you use this API operation with an Object Lambda access + // point, provide the alias of the Object Lambda access point in place of the + // bucket name. If the Object Lambda access point alias in a request is not valid, + // the error code InvalidAccessPointAliasError is returned. For more information + // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketCorsOutput struct { // A set of origins and methods (cross-origin access that you want to allow). You @@ -69,6 +84,9 @@ type GetBucketCorsOutput struct { } func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketCors{}, middleware.After) if err != nil { return err @@ -77,6 +95,13 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketCors"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -95,16 +120,13 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -113,7 +135,10 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil { @@ -125,6 +150,9 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketCorsUpdateEndpoint(stack, options); err != nil { return err } @@ -140,14 +168,26 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketCorsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketCors", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go index 7fa92fc5..c8be5dd0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketEncryption.go @@ -4,37 +4,30 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the default encryption configuration for an Amazon S3 bucket. If the -// bucket does not have a default encryption configuration, GetBucketEncryption -// returns ServerSideEncryptionConfigurationNotFoundError. For information about -// the Amazon S3 default encryption feature, see Amazon S3 Default Bucket -// Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). To use -// this operation, you must have permission to perform the -// s3:GetEncryptionConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// The following operations are related to GetBucketEncryption: -// -// * -// PutBucketEncryption -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) -// -// * -// DeleteBucketEncryption -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// This operation is not supported by directory buckets. Returns the default +// encryption configuration for an Amazon S3 bucket. By default, all buckets have a +// default encryption configuration that uses server-side encryption with Amazon S3 +// managed keys (SSE-S3). For information about the bucket default encryption +// feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. To use this operation, you must have permission to +// perform the s3:GetEncryptionConfiguration action. The bucket owner has this +// permission by default. The bucket owner can grant this permission to others. For +// more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . The following operations are related to GetBucketEncryption : +// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) func (c *Client) GetBucketEncryption(ctx context.Context, params *GetBucketEncryptionInput, optFns ...func(*Options)) (*GetBucketEncryptionOutput, error) { if params == nil { params = &GetBucketEncryptionInput{} @@ -58,14 +51,19 @@ type GetBucketEncryptionInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketEncryptionOutput struct { // Specifies the default server-side-encryption configuration. @@ -78,6 +76,9 @@ type GetBucketEncryptionOutput struct { } func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketEncryption{}, middleware.After) if err != nil { return err @@ -86,6 +87,13 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketEncryption"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -104,16 +112,13 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -122,7 +127,10 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil { @@ -134,6 +142,9 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketEncryptionUpdateEndpoint(stack, options); err != nil { return err } @@ -149,14 +160,26 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketEncryptionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketEncryption", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go index 70bbb9df..a3531f98 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketIntelligentTieringConfiguration.go @@ -4,15 +4,18 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets the S3 Intelligent-Tiering configuration from the specified bucket. The S3 +// This operation is not supported by directory buckets. Gets the S3 +// Intelligent-Tiering configuration from the specified bucket. The S3 // Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering @@ -25,21 +28,11 @@ import ( // monitored and not eligible for auto-tiering. Smaller objects can be stored, but // they are always charged at the Frequent Access tier rates in the S3 // Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). -// Operations related to GetBucketIntelligentTieringConfiguration include: -// -// * -// DeleteBucketIntelligentTieringConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// -// * -// PutBucketIntelligentTieringConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// -// * -// ListBucketIntelligentTieringConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . Operations related to GetBucketIntelligentTieringConfiguration include: +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) func (c *Client) GetBucketIntelligentTieringConfiguration(ctx context.Context, params *GetBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*GetBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &GetBucketIntelligentTieringConfigurationInput{} @@ -71,6 +64,11 @@ type GetBucketIntelligentTieringConfigurationInput struct { noSmithyDocumentSerde } +func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketIntelligentTieringConfigurationOutput struct { // Container for S3 Intelligent-Tiering configuration. @@ -83,6 +81,9 @@ type GetBucketIntelligentTieringConfigurationOutput struct { } func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration{}, middleware.After) if err != nil { return err @@ -91,6 +92,13 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketIntelligentTieringConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -109,16 +117,13 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -127,7 +132,10 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { @@ -139,6 +147,9 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -154,14 +165,26 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketIntelligentTieringConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go index f35a4606..3fe6f986 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketInventoryConfiguration.go @@ -4,37 +4,28 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns an inventory configuration (identified by the inventory configuration -// ID) from the bucket. To use this operation, you must have permissions to perform -// the s3:GetInventoryConfiguration action. The bucket owner has this permission by +// This operation is not supported by directory buckets. Returns an inventory +// configuration (identified by the inventory configuration ID) from the bucket. To +// use this operation, you must have permissions to perform the +// s3:GetInventoryConfiguration action. The bucket owner has this permission by // default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). The -// following operations are related to GetBucketInventoryConfiguration: -// -// * -// DeleteBucketInventoryConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// -// * -// ListBucketInventoryConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) -// -// * -// PutBucketInventoryConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// . The following operations are related to GetBucketInventoryConfiguration : +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) func (c *Client) GetBucketInventoryConfiguration(ctx context.Context, params *GetBucketInventoryConfigurationInput, optFns ...func(*Options)) (*GetBucketInventoryConfigurationOutput, error) { if params == nil { params = &GetBucketInventoryConfigurationInput{} @@ -62,14 +53,19 @@ type GetBucketInventoryConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketInventoryConfigurationOutput struct { // Specifies the inventory configuration. @@ -82,6 +78,9 @@ type GetBucketInventoryConfigurationOutput struct { } func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketInventoryConfiguration{}, middleware.After) if err != nil { return err @@ -90,6 +89,13 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketInventoryConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -108,16 +114,13 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -126,7 +129,10 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil { @@ -138,6 +144,9 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -153,14 +162,26 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketInventoryConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketInventoryConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go index 5d72d2eb..4cc9eff8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLifecycleConfiguration.go @@ -4,59 +4,41 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Bucket lifecycle configuration now supports specifying a lifecycle rule using an -// object key name prefix, one or more object tags, or a combination of both. -// Accordingly, this section describes the latest API. The response describes the -// new filter element that you can use to specify a filter to select a subset of -// objects to which the rule applies. If you are using a previous version of the -// lifecycle configuration, it still works. For the earlier action, see -// GetBucketLifecycle -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). -// Returns the lifecycle configuration information set on the bucket. For -// information about lifecycle configuration, see Object Lifecycle Management -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). To -// use this operation, you must have permission to perform the +// This operation is not supported by directory buckets. Bucket lifecycle +// configuration now supports specifying a lifecycle rule using an object key name +// prefix, one or more object tags, or a combination of both. Accordingly, this +// section describes the latest API. The response describes the new filter element +// that you can use to specify a filter to select a subset of objects to which the +// rule applies. If you are using a previous version of the lifecycle +// configuration, it still works. For the earlier action, see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// . Returns the lifecycle configuration information set on the bucket. For +// information about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// . To use this operation, you must have permission to perform the // s3:GetLifecycleConfiguration action. The bucket owner has this permission, by // default. The bucket owner can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// GetBucketLifecycleConfiguration has the following special error: +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . GetBucketLifecycleConfiguration has the following special error: +// - Error code: NoSuchLifecycleConfiguration +// - Description: The lifecycle configuration does not exist. +// - HTTP Status Code: 404 Not Found +// - SOAP Fault Code Prefix: Client // -// * Error code: -// NoSuchLifecycleConfiguration -// -// * Description: The lifecycle configuration does -// not exist. -// -// * HTTP Status Code: 404 Not Found -// -// * SOAP Fault Code Prefix: -// Client -// -// The following operations are related to -// GetBucketLifecycleConfiguration: -// -// * GetBucketLifecycle -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) -// -// * -// PutBucketLifecycle -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) -// -// * -// DeleteBucketLifecycle -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// The following operations are related to GetBucketLifecycleConfiguration : +// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) func (c *Client) GetBucketLifecycleConfiguration(ctx context.Context, params *GetBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*GetBucketLifecycleConfigurationOutput, error) { if params == nil { params = &GetBucketLifecycleConfigurationInput{} @@ -79,14 +61,19 @@ type GetBucketLifecycleConfigurationInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketLifecycleConfigurationOutput struct { // Container for a lifecycle rule. @@ -99,6 +86,9 @@ type GetBucketLifecycleConfigurationOutput struct { } func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLifecycleConfiguration{}, middleware.After) if err != nil { return err @@ -107,6 +97,13 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLifecycleConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -125,16 +122,13 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -143,7 +137,10 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { @@ -155,6 +152,9 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketLifecycleConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -170,14 +170,26 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketLifecycleConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketLifecycleConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go index fb8ff30f..e94875fe 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLocation.go @@ -15,25 +15,28 @@ import ( smithyxml "github.com/aws/smithy-go/encoding/xml" smithyio "github.com/aws/smithy-go/io" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" "io" ) -// Returns the Region the bucket resides in. You set the bucket's Region using the -// LocationConstraint request parameter in a CreateBucket request. For more -// information, see CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). To use -// this implementation of the operation, you must be the bucket owner. To use this -// API against an access point, provide the alias of the access point in place of -// the bucket name. The following operations are related to GetBucketLocation: -// -// * -// GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// * -// CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// This operation is not supported by directory buckets. Returns the Region the +// bucket resides in. You set the bucket's Region using the LocationConstraint +// request parameter in a CreateBucket request. For more information, see +// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// . When you use this API operation with an access point, provide the alias of the +// access point in place of the bucket name. When you use this API operation with +// an Object Lambda access point, provide the alias of the Object Lambda access +// point in place of the bucket name. If the Object Lambda access point alias in a +// request is not valid, the error code InvalidAccessPointAliasError is returned. +// For more information about InvalidAccessPointAliasError , see List of Error +// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// . We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) +// to return the Region that a bucket resides in. For backward compatibility, +// Amazon S3 continues to support GetBucketLocation. The following operations are +// related to GetBucketLocation : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocationInput, optFns ...func(*Options)) (*GetBucketLocationOutput, error) { if params == nil { params = &GetBucketLocationInput{} @@ -51,25 +54,36 @@ func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocatio type GetBucketLocationInput struct { - // The name of the bucket for which to get the location. + // The name of the bucket for which to get the location. When you use this API + // operation with an access point, provide the alias of the access point in place + // of the bucket name. When you use this API operation with an Object Lambda access + // point, provide the alias of the Object Lambda access point in place of the + // bucket name. If the Object Lambda access point alias in a request is not valid, + // the error code InvalidAccessPointAliasError is returned. For more information + // about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketLocationOutput struct { // Specifies the Region where the bucket resides. For a list of all the Amazon S3 - // supported location constraints by Region, see Regions and Endpoints - // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). Buckets in - // Region us-east-1 have a LocationConstraint of null. + // supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // . Buckets in Region us-east-1 have a LocationConstraint of null . LocationConstraint types.BucketLocationConstraint // Metadata pertaining to the operation's result. @@ -79,6 +93,9 @@ type GetBucketLocationOutput struct { } func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLocation{}, middleware.After) if err != nil { return err @@ -87,6 +104,13 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLocation"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -105,16 +129,13 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -126,7 +147,10 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = swapDeserializerHelper(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil { @@ -138,6 +162,9 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketLocationUpdateEndpoint(stack, options); err != nil { return err } @@ -153,6 +180,12 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } @@ -212,18 +245,24 @@ func swapDeserializerHelper(stack *middleware.Stack) error { return nil } +func (v *GetBucketLocationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketLocation(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketLocation", } } -// getGetBucketLocationBucketMember returns a pointer to string denoting a provided -// bucket member valueand a boolean indicating if the input has a modeled bucket -// name, +// getGetBucketLocationBucketMember returns a pointer to string denoting a +// provided bucket member valueand a boolean indicating if the input has a modeled +// bucket name, func getGetBucketLocationBucketMember(input interface{}) (*string, bool) { in := input.(*GetBucketLocationInput) if in.Bucket == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go index ca115886..5bbca7e4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketLogging.go @@ -4,24 +4,21 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the logging status of a bucket and the permissions users have to view -// and modify that status. To use GET, you must be the bucket owner. The following -// operations are related to GetBucketLogging: -// -// * CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// * -// PutBucketLogging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// This operation is not supported by directory buckets. Returns the logging +// status of a bucket and the permissions users have to view and modify that +// status. The following operations are related to GetBucketLogging : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) { if params == nil { params = &GetBucketLoggingInput{} @@ -44,20 +41,24 @@ type GetBucketLoggingInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketLoggingOutput struct { - // Describes where logs are stored and the prefix that Amazon S3 assigns to all log - // object keys for a bucket. For more information, see PUT Bucket logging - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in - // the Amazon S3 API Reference. + // Describes where logs are stored and the prefix that Amazon S3 assigns to all + // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. LoggingEnabled *types.LoggingEnabled // Metadata pertaining to the operation's result. @@ -67,6 +68,9 @@ type GetBucketLoggingOutput struct { } func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketLogging{}, middleware.After) if err != nil { return err @@ -75,6 +79,13 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketLogging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -93,16 +104,13 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -111,7 +119,10 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil { @@ -123,6 +134,9 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketLoggingUpdateEndpoint(stack, options); err != nil { return err } @@ -138,14 +152,26 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketLoggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketLogging", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go index 22cf389c..913b9a04 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketMetricsConfiguration.go @@ -4,44 +4,31 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets a metrics configuration (specified by the metrics configuration ID) from -// the bucket. Note that this doesn't include the daily storage metrics. To use -// this operation, you must have permissions to perform the -// s3:GetMetricsConfiguration action. The bucket owner has this permission by -// default. The bucket owner can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// For information about CloudWatch request metrics for Amazon S3, see Monitoring -// Metrics with Amazon CloudWatch -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// The following operations are related to GetBucketMetricsConfiguration: -// -// * -// PutBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// -// * -// DeleteBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// -// * -// ListBucketMetricsConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// -// * -// Monitoring Metrics with Amazon CloudWatch -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// This operation is not supported by directory buckets. Gets a metrics +// configuration (specified by the metrics configuration ID) from the bucket. Note +// that this doesn't include the daily storage metrics. To use this operation, you +// must have permissions to perform the s3:GetMetricsConfiguration action. The +// bucket owner has this permission by default. The bucket owner can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about CloudWatch request metrics for Amazon S3, see +// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// . The following operations are related to GetBucketMetricsConfiguration : +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) func (c *Client) GetBucketMetricsConfiguration(ctx context.Context, params *GetBucketMetricsConfigurationInput, optFns ...func(*Options)) (*GetBucketMetricsConfigurationOutput, error) { if params == nil { params = &GetBucketMetricsConfigurationInput{} @@ -64,19 +51,25 @@ type GetBucketMetricsConfigurationInput struct { // This member is required. Bucket *string - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketMetricsConfigurationOutput struct { // Specifies the metrics configuration. @@ -89,6 +82,9 @@ type GetBucketMetricsConfigurationOutput struct { } func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketMetricsConfiguration{}, middleware.After) if err != nil { return err @@ -97,6 +93,13 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketMetricsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -115,16 +118,13 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -133,7 +133,10 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil { @@ -145,6 +148,9 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -160,14 +166,26 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketMetricsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketMetricsConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go index cbf103a7..67a35d97 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketNotificationConfiguration.go @@ -4,29 +4,34 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the notification configuration of a bucket. If notifications are not -// enabled on the bucket, the action returns an empty NotificationConfiguration -// element. By default, you must be the bucket owner to read the notification -// configuration of a bucket. However, the bucket owner can use a bucket policy to -// grant permission to other users to read this configuration with the -// s3:GetBucketNotification permission. For more information about setting and -// reading the notification configuration on a bucket, see Setting Up Notification -// of Bucket Events -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). For -// more information about bucket policies, see Using Bucket Policies -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The -// following action is related to GetBucketNotification: -// -// * PutBucketNotification -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// This operation is not supported by directory buckets. Returns the notification +// configuration of a bucket. If notifications are not enabled on the bucket, the +// action returns an empty NotificationConfiguration element. By default, you must +// be the bucket owner to read the notification configuration of a bucket. However, +// the bucket owner can use a bucket policy to grant permission to other users to +// read this configuration with the s3:GetBucketNotification permission. When you +// use this API operation with an access point, provide the alias of the access +// point in place of the bucket name. When you use this API operation with an +// Object Lambda access point, provide the alias of the Object Lambda access point +// in place of the bucket name. If the Object Lambda access point alias in a +// request is not valid, the error code InvalidAccessPointAliasError is returned. +// For more information about InvalidAccessPointAliasError , see List of Error +// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) +// . For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// . For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// . The following action is related to GetBucketNotification : +// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params *GetBucketNotificationConfigurationInput, optFns ...func(*Options)) (*GetBucketNotificationConfigurationOutput, error) { if params == nil { params = &GetBucketNotificationConfigurationInput{} @@ -44,21 +49,34 @@ func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params type GetBucketNotificationConfigurationInput struct { - // The name of the bucket for which to get the notification configuration. + // The name of the bucket for which to get the notification configuration. When + // you use this API operation with an access point, provide the alias of the access + // point in place of the bucket name. When you use this API operation with an + // Object Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError , see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } -// A container for specifying the notification configuration of the bucket. If this -// element is empty, notifications are turned off for the bucket. +func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +// A container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off for the bucket. type GetBucketNotificationConfigurationOutput struct { // Enables delivery of events to Amazon EventBridge. @@ -68,12 +86,12 @@ type GetBucketNotificationConfigurationOutput struct { // them. LambdaFunctionConfigurations []types.LambdaFunctionConfiguration - // The Amazon Simple Queue Service queues to publish messages to and the events for - // which to publish messages. + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. QueueConfigurations []types.QueueConfiguration - // The topic to which notifications are sent and the events for which notifications - // are generated. + // The topic to which notifications are sent and the events for which + // notifications are generated. TopicConfigurations []types.TopicConfiguration // Metadata pertaining to the operation's result. @@ -83,6 +101,9 @@ type GetBucketNotificationConfigurationOutput struct { } func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketNotificationConfiguration{}, middleware.After) if err != nil { return err @@ -91,6 +112,13 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketNotificationConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -109,16 +137,13 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -127,7 +152,10 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil { @@ -139,6 +167,9 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -154,14 +185,26 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketNotificationConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketNotificationConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go index 571c9566..dca55854 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketOwnershipControls.go @@ -4,26 +4,24 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you -// must have the s3:GetBucketOwnershipControls permission. For more information -// about Amazon S3 permissions, see Specifying permissions in a policy -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). -// For information about Amazon S3 Object Ownership, see Using Object Ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html). -// The following operations are related to GetBucketOwnershipControls: -// -// * -// PutBucketOwnershipControls -// -// * DeleteBucketOwnershipControls +// This operation is not supported by directory buckets. Retrieves +// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have +// the s3:GetBucketOwnershipControls permission. For more information about Amazon +// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html) +// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// . The following operations are related to GetBucketOwnershipControls : +// - PutBucketOwnershipControls +// - DeleteBucketOwnershipControls func (c *Client) GetBucketOwnershipControls(ctx context.Context, params *GetBucketOwnershipControlsInput, optFns ...func(*Options)) (*GetBucketOwnershipControlsOutput, error) { if params == nil { params = &GetBucketOwnershipControlsInput{} @@ -46,14 +44,19 @@ type GetBucketOwnershipControlsInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketOwnershipControlsOutput struct { // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or @@ -67,6 +70,9 @@ type GetBucketOwnershipControlsOutput struct { } func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketOwnershipControls{}, middleware.After) if err != nil { return err @@ -75,6 +81,13 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketOwnershipControls"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -93,16 +106,13 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -111,7 +121,10 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil { @@ -123,6 +136,9 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketOwnershipControlsUpdateEndpoint(stack, options); err != nil { return err } @@ -138,21 +154,33 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketOwnershipControlsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketOwnershipControls", } } -// getGetBucketOwnershipControlsBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, +// getGetBucketOwnershipControlsBucketMember returns a pointer to string denoting +// a provided bucket member valueand a boolean indicating if the input has a +// modeled bucket name, func getGetBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { in := input.(*GetBucketOwnershipControlsInput) if in.Bucket == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go index f16c84cd..ff42b705 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicy.go @@ -4,30 +4,56 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the policy of a specified bucket. If you are using an identity other -// than the root user of the Amazon Web Services account that owns the bucket, the -// calling identity must have the GetBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account in order to use this operation. -// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a 405 -// Method Not Allowed error. As a security precaution, the root user of the Amazon -// Web Services account that owns a bucket can always use this operation, even if -// the policy explicitly denies the root user the ability to perform this action. -// For more information about bucket policies, see Using Bucket Policies and User -// Policies -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). The -// following action is related to GetBucketPolicy: +// Returns the policy of a specified bucket. Directory buckets - For directory +// buckets, you must make requests for this API operation to the Regional endpoint. +// These endpoints support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must both have the GetBucketPolicy permissions on the +// specified bucket and belong to the bucket owner's account in order to use this +// operation. If you don't have GetBucketPolicy permissions, Amazon S3 returns a +// 403 Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. To ensure that bucket owners don't +// inadvertently lock themselves out of their own buckets, the root principal in a +// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , +// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket +// policy explicitly denies the root principal's access. Bucket owner root +// principals can only be blocked from performing these API actions by VPC endpoint +// policies and Amazon Web Services Organizations policies. +// - General purpose bucket permissions - The s3:GetBucketPolicy permission is +// required in a policy. For more information about general purpose buckets bucket +// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:GetBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. // -// * GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// Example bucket policies General purpose buckets example bucket policies - See +// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. Directory bucket example bucket policies - See +// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is s3express-control.region.amazonaws.com . The +// following action is related to GetBucketPolicy : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInput, optFns ...func(*Options)) (*GetBucketPolicyOutput, error) { if params == nil { params = &GetBucketPolicyInput{} @@ -45,19 +71,43 @@ func (c *Client) GetBucketPolicy(ctx context.Context, params *GetBucketPolicyInp type GetBucketPolicyInput struct { - // The bucket name for which to get the bucket policy. + // The bucket name to get the bucket policy for. Directory buckets - When you use + // this operation with a directory bucket, you must use path-style requests in the + // format https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide Access points - When you use this API operation with + // an access point, provide the alias of the access point in place of the bucket + // name. Object Lambda access points - When you use this API operation with an + // Object Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError , see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . Access points and Object Lambda access points are not supported by directory + // buckets. // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketPolicyOutput struct { // The bucket policy as a JSON document. @@ -70,6 +120,9 @@ type GetBucketPolicyOutput struct { } func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicy{}, middleware.After) if err != nil { return err @@ -78,6 +131,13 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -96,16 +156,13 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -114,7 +171,10 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketPolicyValidationMiddleware(stack); err != nil { @@ -126,6 +186,9 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketPolicyUpdateEndpoint(stack, options); err != nil { return err } @@ -141,14 +204,26 @@ func (c *Client) addOperationGetBucketPolicyMiddlewares(stack *middleware.Stack, if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketPolicyInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketPolicy", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go index 570f60fa..6acf706f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketPolicyStatus.go @@ -4,39 +4,28 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves the policy status for an Amazon S3 bucket, indicating whether the -// bucket is public. In order to use this operation, you must have the -// s3:GetBucketPolicyStatus permission. For more information about Amazon S3 -// permissions, see Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// For more information about when Amazon S3 considers a bucket public, see The -// Meaning of "Public" -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). -// The following operations are related to GetBucketPolicyStatus: -// -// * Using Amazon -// S3 Block Public Access -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// * -// GetPublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// -// * -// PutPublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// -// * -// DeletePublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// This operation is not supported by directory buckets. Retrieves the policy +// status for an Amazon S3 bucket, indicating whether the bucket is public. In +// order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// . For more information about when Amazon S3 considers a bucket public, see The +// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// . The following operations are related to GetBucketPolicyStatus : +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) func (c *Client) GetBucketPolicyStatus(ctx context.Context, params *GetBucketPolicyStatusInput, optFns ...func(*Options)) (*GetBucketPolicyStatusOutput, error) { if params == nil { params = &GetBucketPolicyStatusInput{} @@ -59,14 +48,19 @@ type GetBucketPolicyStatusInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketPolicyStatusInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketPolicyStatusOutput struct { // The policy status for the specified bucket. @@ -79,6 +73,9 @@ type GetBucketPolicyStatusOutput struct { } func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketPolicyStatus{}, middleware.After) if err != nil { return err @@ -87,6 +84,13 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketPolicyStatus"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -105,16 +109,13 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -123,7 +124,10 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketPolicyStatusValidationMiddleware(stack); err != nil { @@ -135,6 +139,9 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketPolicyStatusUpdateEndpoint(stack, options); err != nil { return err } @@ -150,14 +157,26 @@ func (c *Client) addOperationGetBucketPolicyStatusMiddlewares(stack *middleware. if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketPolicyStatusInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketPolicyStatus(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketPolicyStatus", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go index 5d7f3115..8db927c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketReplication.go @@ -4,37 +4,31 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the replication configuration of a bucket. It can take a while to -// propagate the put or delete a replication configuration to all Amazon S3 -// systems. Therefore, a get request soon after put or delete can return a wrong -// result. For information about replication configuration, see Replication -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon -// S3 User Guide. This action requires permissions for the +// This operation is not supported by directory buckets. Returns the replication +// configuration of a bucket. It can take a while to propagate the put or delete a +// replication configuration to all Amazon S3 systems. Therefore, a get request +// soon after put or delete can return a wrong result. For information about +// replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. This action requires permissions for the // s3:GetReplicationConfiguration action. For more information about permissions, -// see Using Bucket Policies and User Policies -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). If -// you include the Filter element in a replication configuration, you must also -// include the DeleteMarkerReplication and Priority elements. The response also -// returns those elements. For information about GetBucketReplication errors, see -// List of replication-related error codes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) -// The following operations are related to GetBucketReplication: -// -// * -// PutBucketReplication -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) -// -// * -// DeleteBucketReplication -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// . If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. For information about GetBucketReplication errors, +// see List of replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// The following operations are related to GetBucketReplication : +// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) func (c *Client) GetBucketReplication(ctx context.Context, params *GetBucketReplicationInput, optFns ...func(*Options)) (*GetBucketReplicationOutput, error) { if params == nil { params = &GetBucketReplicationInput{} @@ -57,14 +51,19 @@ type GetBucketReplicationInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketReplicationOutput struct { // A container for replication rules. You can add up to 1,000 rules. The maximum @@ -78,6 +77,9 @@ type GetBucketReplicationOutput struct { } func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketReplication{}, middleware.After) if err != nil { return err @@ -86,6 +88,13 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketReplication"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -104,16 +113,13 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -122,7 +128,10 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketReplicationValidationMiddleware(stack); err != nil { @@ -134,6 +143,9 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketReplicationUpdateEndpoint(stack, options); err != nil { return err } @@ -149,14 +161,26 @@ func (c *Client) addOperationGetBucketReplicationMiddlewares(stack *middleware.S if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketReplicationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketReplication", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go index 45f985b9..37c96450 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketRequestPayment.go @@ -4,22 +4,21 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the request payment configuration of a bucket. To use this version of -// the operation, you must be the bucket owner. For more information, see Requester -// Pays Buckets -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The -// following operations are related to GetBucketRequestPayment: -// -// * ListObjects -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// This operation is not supported by directory buckets. Returns the request +// payment configuration of a bucket. To use this version of the operation, you +// must be the bucket owner. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) +// . The following operations are related to GetBucketRequestPayment : +// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) func (c *Client) GetBucketRequestPayment(ctx context.Context, params *GetBucketRequestPaymentInput, optFns ...func(*Options)) (*GetBucketRequestPaymentOutput, error) { if params == nil { params = &GetBucketRequestPaymentInput{} @@ -42,14 +41,19 @@ type GetBucketRequestPaymentInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketRequestPaymentOutput struct { // Specifies who pays for the download and request fees. @@ -62,6 +66,9 @@ type GetBucketRequestPaymentOutput struct { } func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketRequestPayment{}, middleware.After) if err != nil { return err @@ -70,6 +77,13 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketRequestPayment"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -88,16 +102,13 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -106,7 +117,10 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketRequestPaymentValidationMiddleware(stack); err != nil { @@ -118,6 +132,9 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketRequestPaymentUpdateEndpoint(stack, options); err != nil { return err } @@ -133,14 +150,26 @@ func (c *Client) addOperationGetBucketRequestPaymentMiddlewares(stack *middlewar if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketRequestPaymentInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketRequestPayment", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go index 816d1b3e..4c2761be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketTagging.go @@ -4,33 +4,27 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the tag set associated with the bucket. To use this operation, you must -// have permission to perform the s3:GetBucketTagging action. By default, the -// bucket owner has this permission and can grant this permission to others. -// GetBucketTagging has the following special error: +// This operation is not supported by directory buckets. Returns the tag set +// associated with the bucket. To use this operation, you must have permission to +// perform the s3:GetBucketTagging action. By default, the bucket owner has this +// permission and can grant this permission to others. GetBucketTagging has the +// following special error: +// - Error code: NoSuchTagSet +// - Description: There is no tag set associated with the bucket. // -// * Error code: NoSuchTagSet -// -// * -// Description: There is no tag set associated with the bucket. -// -// The following -// operations are related to GetBucketTagging: -// -// * PutBucketTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) -// -// * -// DeleteBucketTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// The following operations are related to GetBucketTagging : +// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) func (c *Client) GetBucketTagging(ctx context.Context, params *GetBucketTaggingInput, optFns ...func(*Options)) (*GetBucketTaggingOutput, error) { if params == nil { params = &GetBucketTaggingInput{} @@ -53,14 +47,19 @@ type GetBucketTaggingInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketTaggingOutput struct { // Contains the tag set. @@ -75,6 +74,9 @@ type GetBucketTaggingOutput struct { } func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketTagging{}, middleware.After) if err != nil { return err @@ -83,6 +85,13 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -101,16 +110,13 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -119,7 +125,10 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketTaggingValidationMiddleware(stack); err != nil { @@ -131,6 +140,9 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketTaggingUpdateEndpoint(stack, options); err != nil { return err } @@ -146,14 +158,26 @@ func (c *Client) addOperationGetBucketTaggingMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketTagging", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go index 3657bd1c..55cad629 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketVersioning.go @@ -4,31 +4,25 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the versioning state of a bucket. To retrieve the versioning state of a -// bucket, you must be the bucket owner. This implementation also returns the MFA -// Delete status of the versioning state. If the MFA Delete status is enabled, the -// bucket owner must use an authentication device to change the versioning state of -// the bucket. The following operations are related to GetBucketVersioning: -// -// * -// GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// * -// PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// * -// DeleteObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// This operation is not supported by directory buckets. Returns the versioning +// state of a bucket. To retrieve the versioning state of a bucket, you must be the +// bucket owner. This implementation also returns the MFA Delete status of the +// versioning state. If the MFA Delete status is enabled , the bucket owner must +// use an authentication device to change the versioning state of the bucket. The +// following operations are related to GetBucketVersioning : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) GetBucketVersioning(ctx context.Context, params *GetBucketVersioningInput, optFns ...func(*Options)) (*GetBucketVersioningOutput, error) { if params == nil { params = &GetBucketVersioningInput{} @@ -51,14 +45,19 @@ type GetBucketVersioningInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketVersioningOutput struct { // Specifies whether MFA delete is enabled in the bucket versioning configuration. @@ -76,6 +75,9 @@ type GetBucketVersioningOutput struct { } func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketVersioning{}, middleware.After) if err != nil { return err @@ -84,6 +86,13 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketVersioning"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -102,16 +111,13 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -120,7 +126,10 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketVersioningValidationMiddleware(stack); err != nil { @@ -132,6 +141,9 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketVersioningUpdateEndpoint(stack, options); err != nil { return err } @@ -147,14 +159,26 @@ func (c *Client) addOperationGetBucketVersioningMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketVersioningInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketVersioning", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go index aa866b30..f0ebf2b0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketWebsite.go @@ -4,30 +4,27 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the website configuration for a bucket. To host website on Amazon S3, -// you can configure a bucket as website by adding a website configuration. For -// more information about hosting websites, see Hosting Websites on Amazon S3 -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). This GET -// action requires the S3:GetBucketWebsite permission. By default, only the bucket -// owner can read the bucket website configuration. However, bucket owners can -// allow other users to read the website configuration by writing a bucket policy -// granting them the S3:GetBucketWebsite permission. The following operations are -// related to DeleteBucketWebsite: -// -// * DeleteBucketWebsite -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) -// -// * -// PutBucketWebsite -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// This operation is not supported by directory buckets. Returns the website +// configuration for a bucket. To host website on Amazon S3, you can configure a +// bucket as website by adding a website configuration. For more information about +// hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// . This GET action requires the S3:GetBucketWebsite permission. By default, only +// the bucket owner can read the bucket website configuration. However, bucket +// owners can allow other users to read the website configuration by writing a +// bucket policy granting them the S3:GetBucketWebsite permission. The following +// operations are related to GetBucketWebsite : +// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) func (c *Client) GetBucketWebsite(ctx context.Context, params *GetBucketWebsiteInput, optFns ...func(*Options)) (*GetBucketWebsiteOutput, error) { if params == nil { params = &GetBucketWebsiteInput{} @@ -50,20 +47,25 @@ type GetBucketWebsiteInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetBucketWebsiteOutput struct { // The object key name of the website error document to use for 4XX class errors. ErrorDocument *types.ErrorDocument - // The name of the index document for the website (for example index.html). + // The name of the index document for the website (for example index.html ). IndexDocument *types.IndexDocument // Specifies the redirect behavior of all requests to a website endpoint of an @@ -80,6 +82,9 @@ type GetBucketWebsiteOutput struct { } func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetBucketWebsite{}, middleware.After) if err != nil { return err @@ -88,6 +93,13 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetBucketWebsite"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -106,16 +118,13 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -124,7 +133,10 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetBucketWebsiteValidationMiddleware(stack); err != nil { @@ -136,6 +148,9 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetBucketWebsiteUpdateEndpoint(stack, options); err != nil { return err } @@ -151,14 +166,26 @@ func (c *Client) addOperationGetBucketWebsiteMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetBucketWebsiteInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetBucketWebsite", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go index 92c38bed..d2dc15c7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -15,131 +16,100 @@ import ( "time" ) -// Retrieves objects from Amazon S3. To use GET, you must have READ access to the -// object. If you grant READ access to the anonymous user, you can return the -// object without using an authorization header. An Amazon S3 bucket has no -// directory hierarchy such as you would find in a typical computer file system. -// You can, however, create a logical hierarchy by using object key names that -// imply a folder structure. For example, instead of naming an object sample.jpg, -// you can name it photos/2006/February/sample.jpg. To get an object from such a -// logical hierarchy, specify the full key name for the object in the GET -// operation. For a virtual hosted-style request example, if you have the object -// photos/2006/February/sample.jpg, specify the resource as -// /photos/2006/February/sample.jpg. For a path-style request example, if you have -// the object photos/2006/February/sample.jpg in the bucket named examplebucket, -// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more -// information about request types, see HTTP Host Header Bucket Specification -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket). -// For more information about returning the ACL of an object, see GetObjectAcl -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). If the -// object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive -// storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep -// Archive tiers, before you can retrieve the object you must first restore a copy -// using RestoreObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// Otherwise, this action returns an InvalidObjectStateError error. For information -// about restoring archived objects, see Restoring Archived Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). -// Encryption request headers, like x-amz-server-side-encryption, should not be -// sent for GET requests if your object uses server-side encryption with KMS keys -// (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys -// (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 -// BadRequest error. If you encrypt an object by using server-side encryption with -// customer-provided encryption keys (SSE-C) when you store the object in Amazon -// S3, then when you GET the object, you must use the following headers: +// Retrieves an object from Amazon S3. In the GetObject request, specify the full +// key name for the object. General purpose buckets - Both the virtual-hosted-style +// requests and the path-style requests are supported. For a virtual hosted-style +// request example, if you have the object photos/2006/February/sample.jpg , +// specify the object key name as /photos/2006/February/sample.jpg . For a +// path-style request example, if you have the object +// photos/2006/February/sample.jpg in the bucket named examplebucket , specify the +// object key name as /examplebucket/photos/2006/February/sample.jpg . For more +// information about request types, see HTTP Host Header Bucket Specification (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) +// in the Amazon S3 User Guide. Directory buckets - Only virtual-hosted-style +// requests are supported. For a virtual hosted-style request example, if you have +// the object photos/2006/February/sample.jpg in the bucket named +// examplebucket--use1-az5--x-s3 , specify the object key name as +// /photos/2006/February/sample.jpg . Also, when you make requests to this API +// operation, your requests are sent to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - You must have the required permissions +// in a policy. To use GetObject , you must have the READ access to the object +// (or version). If you grant READ access to the anonymous user, the GetObject +// operation returns the object without using an authorization header. For more +// information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If you include a versionId in your request +// header, you must have the s3:GetObjectVersion permission to access a specific +// version of an object. The s3:GetObject permission is not required in this +// scenario. If you request the current version of an object without a specific +// versionId in the request header, only the s3:GetObject permission is required. +// The s3:GetObjectVersion permission is not required in this scenario. If the +// object that you request doesn’t exist, the error that Amazon S3 returns depends +// on whether you also have the s3:ListBucket permission. +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Access Denied error. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * -// x-amz-server-side-encryption-customer-algorithm +// Storage classes If the object you are retrieving is stored in the S3 Glacier +// Flexible Retrieval storage class, the S3 Glacier Deep Archive storage class, the +// S3 Intelligent-Tiering Archive Access tier, or the S3 Intelligent-Tiering Deep +// Archive Access tier, before you can retrieve the object you must first restore a +// copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// . Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the +// S3 Express One Zone storage class is supported to store newly created objects. +// Unsupported storage class values won't write a destination object and will +// respond with the HTTP status code 400 Bad Request . Encryption Encryption +// request headers, like x-amz-server-side-encryption , should not be sent for the +// GetObject requests, if your object uses server-side encryption with Amazon S3 +// managed encryption keys (SSE-S3), server-side encryption with Key Management +// Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption with Amazon +// Web Services KMS keys (DSSE-KMS). If you include the header in your GetObject +// requests for the object that uses these types of keys, you’ll get an HTTP 400 +// Bad Request error. Overriding response header values through the request There +// are times when you want to override certain response header values of a +// GetObject response. For example, you might override the Content-Disposition +// response header value through your GetObject request. You can override values +// for a set of response headers. These modified response header values are +// included only in a successful response, that is, when the HTTP status code 200 +// OK is returned. The headers you can override using the following query +// parameters in the request are a subset of the headers that Amazon S3 accepts +// when you create an object. The response headers that you can override for the +// GetObject response are Cache-Control , Content-Disposition , Content-Encoding , +// Content-Language , Content-Type , and Expires . To override values for a set of +// response headers in the GetObject response, you can use the following query +// parameters in the request. +// - response-cache-control +// - response-content-disposition +// - response-content-encoding +// - response-content-language +// - response-content-type +// - response-expires // -// * -// x-amz-server-side-encryption-customer-key -// -// * -// x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, -// see Server-Side Encryption (Using Customer-Provided Encryption Keys) -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). -// Assuming you have the relevant permission to read object tags, the response also -// returns the x-amz-tagging-count header that provides the count of number of tags -// associated with the object. You can use GetObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) to -// retrieve the tag set associated with an object. Permissions You need the -// relevant read object (or version) permission for this operation. For more -// information, see Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If -// the object you request does not exist, the error Amazon S3 returns depends on -// whether you also have the s3:ListBucket permission. -// -// * If you have the -// s3:ListBucket permission on the bucket, Amazon S3 will return an HTTP status -// code 404 ("no such key") error. -// -// * If you don’t have the s3:ListBucket -// permission, Amazon S3 will return an HTTP status code 403 ("access denied") -// error. -// -// Versioning By default, the GET action returns the current version of an -// object. To return a different version, use the versionId subresource. -// -// * If you -// supply a versionId, you need the s3:GetObjectVersion permission to access a -// specific version of an object. If you request a specific version, you do not -// need to have the s3:GetObject permission. -// -// * If the current version of the -// object is a delete marker, Amazon S3 behaves as if the object was deleted and -// includes x-amz-delete-marker: true in the response. -// -// For more information about -// versioning, see PutBucketVersioning -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). -// Overriding Response Header Values There are times when you want to override -// certain response header values in a GET response. For example, you might -// override the Content-Disposition response header value in your GET request. You -// can override values for a set of response headers using the following query -// parameters. These response header values are sent only on a successful request, -// that is, when status code 200 OK is returned. The set of headers you can -// override using these parameters is a subset of the headers that Amazon S3 -// accepts when you create an object. The response headers that you can override -// for the GET response are Content-Type, Content-Language, Expires, Cache-Control, -// Content-Disposition, and Content-Encoding. To override these header values in -// the GET response, you use the following request parameters. You must sign the -// request, either using an Authorization header or a presigned URL, when using -// these parameters. They cannot be used with an unsigned (anonymous) request. -// -// * -// response-content-type -// -// * response-content-language -// -// * response-expires -// -// * -// response-cache-control -// -// * response-content-disposition -// -// * -// response-content-encoding -// -// Additional Considerations about Request Headers If -// both of the If-Match and If-Unmodified-Since headers are present in the request -// as follows: If-Match condition evaluates to true, and; If-Unmodified-Since -// condition evaluates to false; then, S3 returns 200 OK and the data requested. If -// both of the If-None-Match and If-Modified-Since headers are present in the -// request as follows: If-None-Match condition evaluates to false, and; -// If-Modified-Since condition evaluates to true; then, S3 returns 304 Not Modified -// response code. For more information about conditional requests, see RFC 7232 -// (https://tools.ietf.org/html/rfc7232). The following operations are related to -// GetObject: -// -// * ListBuckets -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) -// -// * -// GetObjectAcl -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// When you use these parameters, you must sign the request by using either an +// Authorization header or a presigned URL. These parameters cannot be used with an +// unsigned (anonymous) request. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com . +// The following operations are related to GetObject : +// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns ...func(*Options)) (*GetObjectOutput, error) { if params == nil { params = &GetObjectInput{} @@ -157,25 +127,35 @@ func (c *Client) GetObject(ctx context.Context, params *GetObjectInput, optFns . type GetObjectInput struct { - // The bucket name containing the object. When using this action with an access - // point, you must direct requests to the access point hostname. The access point + // The bucket name containing the object. Directory buckets - When you use this + // operation with a directory bucket, you must use virtual-hosted-style requests in + // the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style + // requests are not supported. Directory bucket names must be unique in the chosen + // Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Object Lambda access points - When you use this + // action with an Object Lambda access point, you must direct requests to the + // Object Lambda access point hostname. The Object Lambda access point hostname + // takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // Access points and Object Lambda access points are not supported by directory + // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, + // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using an Object Lambda access point the - // hostname takes the form - // AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. When using this - // action with Amazon S3 on Outposts, you must direct requests to the S3 on - // Outposts hostname. The S3 on Outposts hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -188,51 +168,69 @@ type GetObjectInput struct { // To retrieve the checksum, this mode must be enabled. ChecksumMode types.ChecksumMode - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. + // specified in this header; otherwise, return a 412 Precondition Failed error. If + // both of the If-Match and If-Unmodified-Since headers are present in the request + // as follows: If-Match condition evaluates to true , and; If-Unmodified-Since + // condition evaluates to false ; then, S3 returns 200 OK and the data requested. + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) + // . IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. + // otherwise, return a 304 Not Modified error. If both of the If-None-Match and + // If-Modified-Since headers are present in the request as follows: If-None-Match + // condition evaluates to false , and; If-Modified-Since condition evaluates to + // true ; then, S3 returns 304 Not Modified status code. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. + // specified in this header; otherwise, return a 304 Not Modified error. If both + // of the If-None-Match and If-Modified-Since headers are present in the request + // as follows: If-None-Match condition evaluates to false , and; If-Modified-Since + // condition evaluates to true ; then, S3 returns 304 Not Modified HTTP status + // code. For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) + // . IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. + // otherwise, return a 412 Precondition Failed error. If both of the If-Match and + // If-Unmodified-Since headers are present in the request as follows: If-Match + // condition evaluates to true , and; If-Unmodified-Since condition evaluates to + // false ; then, S3 returns 200 OK and the data requested. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and // 10,000. Effectively performs a 'ranged' GET request for the part specified. // Useful for downloading just a part of an object. - PartNumber int32 + PartNumber *int32 - // Downloads the specified range bytes of an object. For more information about the - // HTTP Range header, see - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). Amazon S3 - // doesn't support retrieving multiple ranges of data per GET request. + // Downloads the specified byte range of an object. For more information about the + // HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range (https://www.rfc-editor.org/rfc/rfc9110.html#name-range) + // . Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // Sets the Cache-Control header of the response. ResponseCacheControl *string - // Sets the Content-Disposition header of the response + // Sets the Content-Disposition header of the response. ResponseContentDisposition *string // Sets the Content-Encoding header of the response. @@ -247,79 +245,121 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time - // Specifies the algorithm to use to when decrypting the object (for example, - // AES256). + // Specifies the algorithm to use when decrypting the object (for example, AES256 + // ). If you encrypt an object by using server-side encryption with + // customer-provided encryption keys (SSE-C) when you store the object in Amazon + // S3, then when you GET the object, you must use the following headers: + // - x-amz-server-side-encryption-customer-algorithm + // - x-amz-server-side-encryption-customer-key + // - x-amz-server-side-encryption-customer-key-MD5 + // For more information about SSE-C, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string - // Specifies the customer-provided encryption key for Amazon S3 used to encrypt the - // data. This value is used to decrypt the object when recovering it and must match - // the one used when storing the data. The key must be appropriate for use with the - // algorithm specified in the x-amz-server-side-encryption-customer-algorithm - // header. + // Specifies the customer-provided encryption key that you originally provided for + // Amazon S3 to encrypt the data before storing it. This value is used to decrypt + // the object when recovering it and must match the one used when storing the data. + // The key must be appropriate for use with the algorithm specified in the + // x-amz-server-side-encryption-customer-algorithm header. If you encrypt an object + // by using server-side encryption with customer-provided encryption keys (SSE-C) + // when you store the object in Amazon S3, then when you GET the object, you must + // use the following headers: + // - x-amz-server-side-encryption-customer-algorithm + // - x-amz-server-side-encryption-customer-key + // - x-amz-server-side-encryption-customer-key-MD5 + // For more information about SSE-C, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKey *string - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity check + // to ensure that the encryption key was transmitted without error. If you encrypt + // an object by using server-side encryption with customer-provided encryption keys + // (SSE-C) when you store the object in Amazon S3, then when you GET the object, + // you must use the following headers: + // - x-amz-server-side-encryption-customer-algorithm + // - x-amz-server-side-encryption-customer-key + // - x-amz-server-side-encryption-customer-key-MD5 + // For more information about SSE-C, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKeyMD5 *string - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. By default, the + // GetObject operation returns the current version of an object. To return a + // different version, use the versionId subresource. + // - If you include a versionId in your request header, you must have the + // s3:GetObjectVersion permission to access a specific version of an object. The + // s3:GetObject permission is not required in this scenario. + // - If you request the current version of an object without a specific versionId + // in the request header, only the s3:GetObject permission is required. The + // s3:GetObjectVersion permission is not required in this scenario. + // - Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. For this API operation, only the null value of the version ID is + // supported by directory buckets. You can only specify null to the versionId + // query parameter in the request. + // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html) + // . VersionId *string noSmithyDocumentSerde } +func (in *GetObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type GetObjectOutput struct { - // Indicates that a range of bytes was specified. + // Indicates that a range of bytes was specified in the request. AcceptRanges *string // Object data. Body io.ReadCloser // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not + // supported for directory buckets. + BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string // Specifies presentational information for the object. ContentDisposition *string - // Specifies what content encodings have been applied to the object and thus what + // Indicates what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the // Content-Type header field. ContentEncoding *string @@ -328,7 +368,7 @@ type GetObjectOutput struct { ContentLanguage *string // Size of the body in bytes. - ContentLength int64 + ContentLength *int64 // The portion of the object returned in the response. ContentRange *string @@ -336,24 +376,34 @@ type GetObjectOutput struct { // A standard MIME type describing the format of the object data. ContentType *string - // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Indicates whether the object retrieved was (true) or was not (false) a Delete // Marker. If false, this response header does not appear in the response. - DeleteMarker bool + // - If the current version of the object is a delete marker, Amazon S3 behaves + // as if the object was deleted and includes x-amz-delete-marker: true in the + // response. + // - If the specified version in the request is a delete marker, the response + // returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. + DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is - // URL-encoded. + // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) + // ), the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. This functionality is not supported for directory + // buckets. Expiration *string // The date and time at which the object is no longer cacheable. Expires *time.Time - // Creation date of the object. + // Date and time when the object was last modified. General purpose buckets - When + // you specify a versionId of the object in your request, if the specified version + // in the request is a delete marker, the response returns a 405 Method Not Allowed + // error and the Last-Modified: timestamp response header. LastModified *time.Time // A map of metadata to store with the object in S3. @@ -361,69 +411,88 @@ type GetObjectOutput struct { // Map keys will be normalized to lower-case. Metadata map[string]string - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. - MissingMeta int32 + // This is set to the number of metadata entries not returned in the headers that + // are prefixed with x-amz-meta- . This can happen if you create metadata using an + // API like SOAP that supports more flexible metadata than the REST API. For + // example, using SOAP, you can create metadata whose values are not legal HTTP + // headers. This functionality is not supported for directory buckets. + MissingMeta *int32 // Indicates whether this object has an active legal hold. This field is only - // returned if you have permission to view an object's legal hold status. + // returned if you have permission to view an object's legal hold status. This + // functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode currently in place for this object. + // The Object Lock mode that's currently in place for this object. This + // functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode - // The date and time when this object's Object Lock will expire. + // The date and time when this object's Object Lock will expire. This + // functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount int32 + PartsCount *int32 // Amazon S3 can return this if your request involves a bucket that is either a - // source or destination in a replication rule. + // source or destination in a replication rule. This functionality is not supported + // for directory buckets. ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration action and expiration time of the - // restored object copy. + // restored object copy. This functionality is not supported for directory buckets. + // Only the S3 Express One Zone storage class is supported by directory buckets to + // store objects. Restore *string - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm used. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for the - // object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header - // for all objects except for S3 Standard storage class objects. + // for all objects except for S3 Standard storage class objects. Directory buckets + // - Only the S3 Express One Zone storage class is supported by directory buckets + // to store objects. StorageClass types.StorageClass - // The number of tags, if any, on the object. - TagCount int32 + // The number of tags, if any, on the object, when you have the relevant + // permission to read object tags. You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) + // to retrieve the tag set associated with an object. This functionality is not + // supported for directory buckets. + TagCount *int32 - // Version of the object. + // Version ID of the object. This functionality is not supported for directory + // buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. + // value of this header in the object metadata. This functionality is not supported + // for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -433,6 +502,9 @@ type GetObjectOutput struct { } func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetObject{}, middleware.After) if err != nil { return err @@ -441,6 +513,13 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -459,22 +538,22 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetObjectValidationMiddleware(stack); err != nil { @@ -486,6 +565,9 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil { return err } @@ -504,14 +586,26 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetObject(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetObject", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go index 709e62ff..3b2a1687 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,35 +13,24 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the access control list (ACL) of an object. To use this operation, you -// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For more -// information, see Mapping of ACL permissions and access policy permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) -// in the Amazon S3 User Guide This action is not supported by Amazon S3 on -// Outposts. Versioning By default, GET returns ACL information about the current -// version of an object. To return ACL information about a different version, use -// the versionId subresource. If your bucket uses the bucket owner enforced setting -// for S3 Object Ownership, requests to read ACLs are still supported and return -// the bucket-owner-full-control ACL with the owner being the account that created -// the bucket. For more information, see Controlling object ownership and -// disabling ACLs -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// This operation is not supported by directory buckets. Returns the access +// control list (ACL) of an object. To use this operation, you must have +// s3:GetObjectAcl permissions or READ_ACP access to the object. For more +// information, see Mapping of ACL permissions and access policy permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) +// in the Amazon S3 User Guide This functionality is not supported for Amazon S3 on +// Outposts. By default, GET returns ACL information about the current version of +// an object. To return ACL information about a different version, use the +// versionId subresource. If your bucket uses the bucket owner enforced setting for +// S3 Object Ownership, requests to read ACLs are still supported and return the +// bucket-owner-full-control ACL with the owner being the account that created the +// bucket. For more information, see Controlling object ownership and disabling +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) // in the Amazon S3 User Guide. The following operations are related to -// GetObjectAcl: -// -// * GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// * -// GetObjectAttributes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// * -// DeleteObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) -// -// * -// PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// GetObjectAcl : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, optFns ...func(*Options)) (*GetObjectAclOutput, error) { if params == nil { params = &GetObjectAclInput{} @@ -59,13 +49,14 @@ func (c *Client) GetObjectAcl(ctx context.Context, params *GetObjectAclInput, op type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. - // When using this action with an access point, you must direct requests to the + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // This member is required. @@ -76,25 +67,34 @@ type GetObjectAclInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. This + // functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } +func (in *GetObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type GetObjectAclOutput struct { // A list of grants. @@ -104,7 +104,7 @@ type GetObjectAclOutput struct { Owner *types.Owner // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -114,6 +114,9 @@ type GetObjectAclOutput struct { } func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAcl{}, middleware.After) if err != nil { return err @@ -122,6 +125,13 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAcl"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -140,16 +150,13 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -158,7 +165,10 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetObjectAclValidationMiddleware(stack); err != nil { @@ -170,6 +180,9 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetObjectAclUpdateEndpoint(stack, options); err != nil { return err } @@ -185,14 +198,26 @@ func (c *Client) addOperationGetObjectAclMiddlewares(stack *middleware.Stack, op if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetObjectAclInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetObjectAcl", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go index fb1683e7..ed53ae7b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -14,113 +15,95 @@ import ( ) // Retrieves all the metadata from an object without returning the object itself. -// This action is useful if you're interested only in an object's metadata. To use -// GetObjectAttributes, you must have READ access to the object. -// GetObjectAttributes combines the functionality of GetObjectAcl, -// GetObjectLegalHold, GetObjectLockConfiguration, GetObjectRetention, -// GetObjectTagging, HeadObject, and ListParts. All of the data returned with each -// of those individual calls can be returned with a single call to -// GetObjectAttributes. If you encrypt an object by using server-side encryption +// This operation is useful if you're interested only in an object's metadata. +// GetObjectAttributes combines the functionality of HeadObject and ListParts . All +// of the data returned with each of those individual calls can be returned with a +// single call to GetObjectAttributes . Directory buckets - For directory buckets, +// you must make requests for this API operation to the Zonal endpoint. These +// endpoints support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - To use GetObjectAttributes , you must +// have READ access to the object. The permissions that you need to use this +// operation with depend on whether the bucket is versioned. If the bucket is +// versioned, you need both the s3:GetObjectVersion and +// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is +// not versioned, you need the s3:GetObject and s3:GetObjectAttributes +// permissions. For more information, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If the object that you request does not exist, the +// error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found ("no such key") error. +// - If you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden ("access denied") error. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . +// +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for HEAD requests if your object uses server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3). The +// x-amz-server-side-encryption header is used when you PUT an object to S3 and +// want to specify the encryption method. If you include this header in a GET +// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad +// Request error. It's because the encryption method can't be changed when you +// retrieve the object. If you encrypt an object by using server-side encryption // with customer-provided encryption keys (SSE-C) when you store the object in // Amazon S3, then when you retrieve the metadata from the object, you must use the -// following headers: -// -// * x-amz-server-side-encryption-customer-algorithm -// -// * -// x-amz-server-side-encryption-customer-key -// -// * -// x-amz-server-side-encryption-customer-key-MD5 -// -// For more information about SSE-C, -// see Server-Side Encryption (Using Customer-Provided Encryption Keys) -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. -// -// * Encryption request headers, such as -// x-amz-server-side-encryption, should not be sent for GET requests if your object -// uses server-side encryption with Amazon Web Services KMS keys stored in Amazon -// Web Services Key Management Service (SSE-KMS) or server-side encryption with -// Amazon S3 managed encryption keys (SSE-S3). If your object does use these types -// of keys, you'll get an HTTP 400 Bad Request error. -// -// * The last modified property -// in this case is the creation date of the object. -// -// Consider the following when -// using request headers: -// -// * If both of the If-Match and If-Unmodified-Since -// headers are present in the request as follows, then Amazon S3 returns the HTTP -// status code 200 OK and the data requested: -// -// * If-Match condition evaluates to -// true. -// -// * If-Unmodified-Since condition evaluates to false. -// -// * If both of the -// If-None-Match and If-Modified-Since headers are present in the request as -// follows, then Amazon S3 returns the HTTP status code 304 Not Modified: -// -// * -// If-None-Match condition evaluates to false. +// following headers to provide the encryption key for the server to be able to +// retrieve the object's metadata. The headers are: +// - x-amz-server-side-encryption-customer-algorithm +// - x-amz-server-side-encryption-customer-key +// - x-amz-server-side-encryption-customer-key-MD5 // -// * If-Modified-Since condition -// evaluates to true. +// For more information about SSE-C, see Server-Side Encryption (Using +// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. Directory bucket permissions - For directory +// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( +// AES256 ) is supported. Versioning Directory buckets - S3 Versioning isn't +// enabled and supported for directory buckets. For this API operation, only the +// null value of the version ID is supported by directory buckets. You can only +// specify null to the versionId query parameter in the request. Conditional +// request headers Consider the following when using request headers: +// - If both of the If-Match and If-Unmodified-Since headers are present in the +// request as follows, then Amazon S3 returns the HTTP status code 200 OK and the +// data requested: +// - If-Match condition evaluates to true . +// - If-Unmodified-Since condition evaluates to false . For more information +// about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) +// . +// - If both of the If-None-Match and If-Modified-Since headers are present in +// the request as follows, then Amazon S3 returns the HTTP status code 304 Not +// Modified : +// - If-None-Match condition evaluates to false . +// - If-Modified-Since condition evaluates to true . For more information about +// conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . // -// For more information about conditional requests, see RFC -// 7232 (https://tools.ietf.org/html/rfc7232). Permissions The permissions that you -// need to use this operation depend on whether the bucket is versioned. If the -// bucket is versioned, you need both the s3:GetObjectVersion and -// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is -// not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions. -// For more information, see Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in -// the Amazon S3 User Guide. If the object that you request does not exist, the -// error Amazon S3 returns depends on whether you also have the s3:ListBucket -// permission. -// -// * If you have the s3:ListBucket permission on the bucket, Amazon S3 -// returns an HTTP status code 404 Not Found ("no such key") error. -// -// * If you don't -// have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 -// Forbidden ("access denied") error. -// -// The following actions are related to -// GetObjectAttributes: -// -// * GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// * -// GetObjectAcl -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) -// -// * -// GetObjectLegalHold -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) -// -// * -// GetObjectLockConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) -// -// * -// GetObjectRetention -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) -// -// * -// GetObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// -// * -// HeadObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) -// -// * -// ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are +// related to GetObjectAttributes : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) +// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) +// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) { if params == nil { params = &GetObjectAttributesInput{} @@ -138,23 +121,31 @@ func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttri type GetObjectAttributesInput struct { - // The name of the bucket that contains the object. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The name of the bucket that contains the object. Directory buckets - When you + // use this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -164,61 +155,76 @@ type GetObjectAttributesInput struct { // This member is required. Key *string - // An XML header that specifies the fields at the root level that you want returned - // in the response. Fields that you do not specify are not returned. + // Specifies the fields at the root level that you want returned in the response. + // Fields that you do not specify are not returned. // // This member is required. ObjectAttributes []types.ObjectAttributes - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Sets the maximum number of parts to return. - MaxParts int32 + MaxParts *int32 - // Specifies the part after which listing should begin. Only parts with higher part - // numbers will be listed. + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. PartNumberMarker *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use when encrypting the object (for example, AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. SSECustomerKeyMD5 *string - // The version ID used to reference a specific version of the object. + // The version ID used to reference a specific version of the object. S3 + // Versioning isn't enabled and supported for directory buckets. For this API + // operation, only the null value of the version ID is supported by directory + // buckets. You can only specify null to the versionId query parameter in the + // request. VersionId *string noSmithyDocumentSerde } +func (in *GetObjectAttributesInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type GetObjectAttributesOutput struct { // The checksum or digest of the object. Checksum *types.Checksum - // Specifies whether the object retrieved was (true) or was not (false) a delete - // marker. If false, this response header does not appear in the response. - DeleteMarker bool + // Specifies whether the object retrieved was ( true ) or was not ( false ) a + // delete marker. If false , this response header does not appear in the response. + // This functionality is not supported for directory buckets. + DeleteMarker *bool // An ETag is an opaque identifier assigned by a web server to a specific version // of a resource found at a URL. @@ -231,19 +237,21 @@ type GetObjectAttributesOutput struct { ObjectParts *types.GetObjectAttributesParts // The size of the object in bytes. - ObjectSize int64 + ObjectSize *int64 // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides the storage class information of the object. Amazon S3 returns this // header for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass types.StorageClass - // The version ID of the object. + // The version ID of the object. This functionality is not supported for directory + // buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -253,6 +261,9 @@ type GetObjectAttributesOutput struct { } func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAttributes{}, middleware.After) if err != nil { return err @@ -261,6 +272,13 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectAttributes"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -279,16 +297,13 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -297,7 +312,10 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil { @@ -309,6 +327,9 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil { return err } @@ -324,14 +345,26 @@ func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetObjectAttributesInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetObjectAttributes(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetObjectAttributes", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go index a2446ac3..3f7af220 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,13 +13,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets an object's current legal hold status. For more information, see Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This -// action is not supported by Amazon S3 on Outposts. The following action is -// related to GetObjectLegalHold: -// -// * GetObjectAttributes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported by directory buckets. Gets an object's current +// legal hold status. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . This functionality is not supported for Amazon S3 on Outposts. The following +// action is related to GetObjectLegalHold : +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) { if params == nil { params = &GetObjectLegalHoldInput{} @@ -37,13 +36,14 @@ func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalH type GetObjectLegalHoldInput struct { // The bucket name containing the object whose legal hold status you want to - // retrieve. When using this action with an access point, you must direct requests - // to the access point hostname. The access point hostname takes the form + // retrieve. Access points - When you use this action with an access point, you + // must provide the alias of the access point in place of the bucket name or + // specify the access point ARN. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // This member is required. @@ -54,17 +54,19 @@ type GetObjectLegalHoldInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The version ID of the object whose legal hold status you want to retrieve. @@ -73,6 +75,11 @@ type GetObjectLegalHoldInput struct { noSmithyDocumentSerde } +func (in *GetObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type GetObjectLegalHoldOutput struct { // The current legal hold status for the specified object. @@ -85,6 +92,9 @@ type GetObjectLegalHoldOutput struct { } func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLegalHold{}, middleware.After) if err != nil { return err @@ -93,6 +103,13 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLegalHold"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -111,16 +128,13 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -129,7 +143,10 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetObjectLegalHoldValidationMiddleware(stack); err != nil { @@ -141,6 +158,9 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetObjectLegalHoldUpdateEndpoint(stack, options); err != nil { return err } @@ -156,14 +176,26 @@ func (c *Client) addOperationGetObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetObjectLegalHoldInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetObjectLegalHold", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go index 91793c13..1ba436d5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLockConfiguration.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,15 +13,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Gets the Object Lock configuration for a bucket. The rule specified in the -// Object Lock configuration will be applied by default to every new object placed -// in the specified bucket. For more information, see Locking Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). The -// following action is related to GetObjectLockConfiguration: -// -// * -// GetObjectAttributes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported by directory buckets. Gets the Object Lock +// configuration for a bucket. The rule specified in the Object Lock configuration +// will be applied by default to every new object placed in the specified bucket. +// For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . The following action is related to GetObjectLockConfiguration : +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) { if params == nil { params = &GetObjectLockConfigurationInput{} @@ -38,27 +36,33 @@ func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObje type GetObjectLockConfigurationInput struct { - // The bucket whose Object Lock configuration you want to retrieve. When using this - // action with an access point, you must direct requests to the access point + // The bucket whose Object Lock configuration you want to retrieve. Access points + // - When you use this action with an access point, you must provide the alias of + // the access point in place of the bucket name or specify the access point ARN. + // When using the access point ARN, you must direct requests to the access point // hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type GetObjectLockConfigurationOutput struct { // The specified bucket's Object Lock configuration. @@ -71,6 +75,9 @@ type GetObjectLockConfigurationOutput struct { } func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectLockConfiguration{}, middleware.After) if err != nil { return err @@ -79,6 +86,13 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectLockConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -97,16 +111,13 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -115,7 +126,10 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetObjectLockConfigurationValidationMiddleware(stack); err != nil { @@ -127,6 +141,9 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetObjectLockConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -142,21 +159,33 @@ func (c *Client) addOperationGetObjectLockConfigurationMiddlewares(stack *middle if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetObjectLockConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetObjectLockConfiguration", } } -// getGetObjectLockConfigurationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, +// getGetObjectLockConfigurationBucketMember returns a pointer to string denoting +// a provided bucket member valueand a boolean indicating if the input has a +// modeled bucket name, func getGetObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { in := input.(*GetObjectLockConfigurationInput) if in.Bucket == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go index 33fc0489..1dea7d8a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,13 +13,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves an object's retention settings. For more information, see Locking -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This -// action is not supported by Amazon S3 on Outposts. The following action is -// related to GetObjectRetention: -// -// * GetObjectAttributes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// This operation is not supported by directory buckets. Retrieves an object's +// retention settings. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . This functionality is not supported for Amazon S3 on Outposts. The following +// action is related to GetObjectRetention : +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) { if params == nil { params = &GetObjectRetentionInput{} @@ -37,13 +36,14 @@ func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetent type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to - // retrieve. When using this action with an access point, you must direct requests - // to the access point hostname. The access point hostname takes the form + // retrieve. Access points - When you use this action with an access point, you + // must provide the alias of the access point in place of the bucket name or + // specify the access point ARN. When using the access point ARN, you must direct + // requests to the access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // This member is required. @@ -54,17 +54,19 @@ type GetObjectRetentionInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The version ID for the object whose retention settings you want to retrieve. @@ -73,6 +75,11 @@ type GetObjectRetentionInput struct { noSmithyDocumentSerde } +func (in *GetObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type GetObjectRetentionOutput struct { // The container element for an object's retention settings. @@ -85,6 +92,9 @@ type GetObjectRetentionOutput struct { } func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectRetention{}, middleware.After) if err != nil { return err @@ -93,6 +103,13 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectRetention"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -111,16 +128,13 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -129,7 +143,10 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetObjectRetentionValidationMiddleware(stack); err != nil { @@ -141,6 +158,9 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetObjectRetentionUpdateEndpoint(stack, options); err != nil { return err } @@ -156,14 +176,26 @@ func (c *Client) addOperationGetObjectRetentionMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetObjectRetentionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetObjectRetention", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go index cec5210c..c020e9bd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,28 +13,20 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns the tag-set of an object. You send the GET request against the tagging -// subresource associated with the object. To use this operation, you must have -// permission to perform the s3:GetObjectTagging action. By default, the GET action -// returns information about current version of an object. For a versioned bucket, -// you can have multiple versions of an object in your bucket. To retrieve tags of -// any other version, use the versionId query parameter. You also need permission -// for the s3:GetObjectVersionTagging action. By default, the bucket owner has this +// This operation is not supported by directory buckets. Returns the tag-set of an +// object. You send the GET request against the tagging subresource associated with +// the object. To use this operation, you must have permission to perform the +// s3:GetObjectTagging action. By default, the GET action returns information about +// current version of an object. For a versioned bucket, you can have multiple +// versions of an object in your bucket. To retrieve tags of any other version, use +// the versionId query parameter. You also need permission for the +// s3:GetObjectVersionTagging action. By default, the bucket owner has this // permission and can grant this permission to others. For information about the -// Amazon S3 object tagging feature, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). The -// following actions are related to GetObjectTagging: -// -// * DeleteObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) -// -// * -// GetObjectAttributes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// * -// PutObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// Amazon S3 object tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) +// . The following actions are related to GetObjectTagging : +// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) { if params == nil { params = &GetObjectTaggingInput{} @@ -52,22 +45,22 @@ func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingI type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. - // When using this action with an access point, you must direct requests to the + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -77,17 +70,19 @@ type GetObjectTaggingInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The versionId of the object for which to get the tagging information. @@ -96,6 +91,11 @@ type GetObjectTaggingInput struct { noSmithyDocumentSerde } +func (in *GetObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type GetObjectTaggingOutput struct { // Contains the tag set. @@ -113,6 +113,9 @@ type GetObjectTaggingOutput struct { } func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTagging{}, middleware.After) if err != nil { return err @@ -121,6 +124,13 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -139,16 +149,13 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -157,7 +164,10 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetObjectTaggingValidationMiddleware(stack); err != nil { @@ -169,6 +179,9 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetObjectTaggingUpdateEndpoint(stack, options); err != nil { return err } @@ -184,14 +197,26 @@ func (c *Client) addOperationGetObjectTaggingMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetObjectTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetObjectTagging", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go index fa71442c..6689ef97 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -13,18 +14,14 @@ import ( "io" ) -// Returns torrent files from a bucket. BitTorrent can save you bandwidth when -// you're distributing large files. For more information about BitTorrent, see -// Using BitTorrent with Amazon S3 -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). You can get -// torrent only for objects that are less than 5 GB in size, and that are not -// encrypted using server-side encryption with a customer-provided encryption key. -// To use GET, you must have READ access to the object. This action is not -// supported by Amazon S3 on Outposts. The following action is related to -// GetObjectTorrent: -// -// * GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// This operation is not supported by directory buckets. Returns torrent files +// from a bucket. BitTorrent can save you bandwidth when you're distributing large +// files. You can get torrent only for objects that are less than 5 GB in size, and +// that are not encrypted using server-side encryption with a customer-provided +// encryption key. To use GET, you must have READ access to the object. This +// functionality is not supported for Amazon S3 on Outposts. The following action +// is related to GetObjectTorrent : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) GetObjectTorrent(ctx context.Context, params *GetObjectTorrentInput, optFns ...func(*Options)) (*GetObjectTorrentOutput, error) { if params == nil { params = &GetObjectTorrentInput{} @@ -52,29 +49,36 @@ type GetObjectTorrentInput struct { // This member is required. Key *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer noSmithyDocumentSerde } +func (in *GetObjectTorrentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type GetObjectTorrentOutput struct { // A Bencoded dictionary as defined by the BitTorrent specification Body io.ReadCloser // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -84,6 +88,9 @@ type GetObjectTorrentOutput struct { } func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectTorrent{}, middleware.After) if err != nil { return err @@ -92,6 +99,13 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetObjectTorrent"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -110,22 +124,22 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetObjectTorrentValidationMiddleware(stack); err != nil { @@ -137,6 +151,9 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetObjectTorrentUpdateEndpoint(stack, options); err != nil { return err } @@ -152,14 +169,26 @@ func (c *Client) addOperationGetObjectTorrentMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetObjectTorrentInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetObjectTorrent(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetObjectTorrent", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go index eb42c7d2..0ae12e39 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetPublicAccessBlock.go @@ -4,44 +4,32 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use -// this operation, you must have the s3:GetBucketPublicAccessBlock permission. For -// more information about Amazon S3 permissions, see Specifying Permissions in a -// Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an -// object, it checks the PublicAccessBlock configuration for both the bucket (or -// the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported by directory buckets. Retrieves the +// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, +// you must have the s3:GetBucketPublicAccessBlock permission. For more +// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or +// an object, it checks the PublicAccessBlock configuration for both the bucket +// (or the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock settings are different between the bucket and the account, // Amazon S3 uses the most restrictive combination of the bucket-level and // account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). -// The following operations are related to GetPublicAccessBlock: -// -// * Using Amazon S3 -// Block Public Access -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) -// -// * -// PutPublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) -// -// * -// GetPublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// -// * -// DeletePublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// . The following operations are related to GetPublicAccessBlock : +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) func (c *Client) GetPublicAccessBlock(ctx context.Context, params *GetPublicAccessBlockInput, optFns ...func(*Options)) (*GetPublicAccessBlockOutput, error) { if params == nil { params = &GetPublicAccessBlockInput{} @@ -65,14 +53,19 @@ type GetPublicAccessBlockInput struct { // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *GetPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type GetPublicAccessBlockOutput struct { // The PublicAccessBlock configuration currently in effect for this Amazon S3 @@ -86,6 +79,9 @@ type GetPublicAccessBlockOutput struct { } func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpGetPublicAccessBlock{}, middleware.After) if err != nil { return err @@ -94,6 +90,13 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetPublicAccessBlock"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -112,16 +115,13 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -130,7 +130,10 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpGetPublicAccessBlockValidationMiddleware(stack); err != nil { @@ -142,6 +145,9 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addGetPublicAccessBlockUpdateEndpoint(stack, options); err != nil { return err } @@ -157,14 +163,26 @@ func (c *Client) addOperationGetPublicAccessBlockMiddlewares(stack *middleware.S if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *GetPublicAccessBlockInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opGetPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "GetPublicAccessBlock", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go index c3d9a16f..8a18ff85 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadBucket.go @@ -17,27 +17,44 @@ import ( "time" ) -// This action is useful to determine if a bucket exists and you have permission to -// access it. The action returns a 200 OK if the bucket exists and you have -// permission to access it. If the bucket does not exist or you do not have -// permission to access it, the HEAD request returns a generic 404 Not Found or 403 -// Forbidden code. A message body is not included, so you cannot determine the -// exception beyond these error codes. To use this operation, you must have -// permissions to perform the s3:ListBucket action. The bucket owner has this -// permission by default and can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// To use this API against an access point, you must provide the alias of the -// access point in place of the bucket name or specify the access point ARN. When -// using the access point ARN, you must direct requests to the access point -// hostname. The access point hostname takes the form -// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the -// Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For -// more information see, Using access points -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html). +// You can use this operation to determine if a bucket exists and if you have +// permission to access it. The action returns a 200 OK if the bucket exists and +// you have permission to access it. If the bucket does not exist or you do not +// have permission to access it, the HEAD request returns a generic 400 Bad Request +// , 403 Forbidden or 404 Not Found code. A message body is not included, so you +// cannot determine the exception beyond these error codes. Directory buckets - You +// must make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests +// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Authentication and authorization All HeadBucket +// requests must be authenticated and signed by using IAM credentials (access key +// ID and secret access key for the IAM identities). All headers with the x-amz- +// prefix, including x-amz-copy-source , must be signed. For more information, see +// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +// . Directory bucket - You must use IAM credentials to authenticate and authorize +// your access to the HeadBucket API operation, instead of using the temporary +// security credentials through the CreateSession API operation. Amazon Web +// Services CLI or SDKs handles authentication and authorization on your behalf. +// Permissions +// - General purpose bucket permissions - To use this operation, you must have +// permissions to perform the s3:ListBucket action. The bucket owner has this +// permission by default and can grant this permission to others. For more +// information about permissions, see Managing access permissions to your Amazon +// S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - You must have the s3express:CreateSession +// permission in the Action element of a policy. By default, the session is in +// the ReadWrite mode. If you want to restrict the access, you can explicitly set +// the s3express:SessionMode condition key to ReadOnly on the bucket. For more +// information about example bucket policies, see Example bucket policies for S3 +// Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns ...func(*Options)) (*HeadBucketOutput, error) { if params == nil { params = &HeadBucketInput{} @@ -55,35 +72,73 @@ func (c *Client) HeadBucket(ctx context.Context, params *HeadBucketInput, optFns type HeadBucketInput struct { - // The bucket name. When using this action with an access point, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, + // The bucket name. Directory buckets - When you use this operation with a + // directory bucket, you must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Object Lambda access points - When you use this API + // operation with an Object Lambda access point, provide the alias of the Object + // Lambda access point in place of the bucket name. If the Object Lambda access + // point alias in a request is not valid, the error code + // InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList) + // . Access points and Object Lambda access points are not supported by directory + // buckets. S3 on Outposts - When you use this action with Amazon S3 on Outposts, // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *HeadBucketInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type HeadBucketOutput struct { + + // Indicates whether the bucket name used in the request is an access point alias. + // This functionality is not supported for directory buckets. + AccessPointAlias *bool + + // The name of the location where the bucket will be created. For directory + // buckets, the AZ ID of the Availability Zone where the bucket is created. An + // example AZ ID value is usw2-az2 . This functionality is only supported by + // directory buckets. + BucketLocationName *string + + // The type of location where the bucket is created. This functionality is only + // supported by directory buckets. + BucketLocationType types.LocationType + + // The Region that the bucket is located. This functionality is not supported for + // directory buckets. + BucketRegion *string + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -91,6 +146,9 @@ type HeadBucketOutput struct { } func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpHeadBucket{}, middleware.After) if err != nil { return err @@ -99,6 +157,13 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "HeadBucket"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -117,16 +182,13 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -135,7 +197,10 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpHeadBucketValidationMiddleware(stack); err != nil { @@ -147,6 +212,9 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addHeadBucketUpdateEndpoint(stack, options); err != nil { return err } @@ -162,9 +230,22 @@ func (c *Client) addOperationHeadBucketMiddlewares(stack *middleware.Stack, opti if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *HeadBucketInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + // HeadBucketAPIClient is a client that implements the HeadBucket operation. type HeadBucketAPIClient interface { HeadBucket(context.Context, *HeadBucketInput, ...func(*Options)) (*HeadBucketOutput, error) @@ -185,9 +266,9 @@ type BucketExistsWaiterOptions struct { // MinDelay must resolve to a value lesser than or equal to the MaxDelay. MinDelay time.Duration - // MaxDelay is the maximum amount of time to delay between retries. If unset or set - // to zero, BucketExistsWaiter will use default max delay of 120 seconds. Note that - // MaxDelay must resolve to value greater than or equal to the MinDelay. + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, BucketExistsWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. MaxDelay time.Duration // LogWaitAttempts is used to enable logging for waiter retry attempts @@ -338,9 +419,9 @@ type BucketNotExistsWaiterOptions struct { // MinDelay must resolve to a value lesser than or equal to the MaxDelay. MinDelay time.Duration - // MaxDelay is the maximum amount of time to delay between retries. If unset or set - // to zero, BucketNotExistsWaiter will use default max delay of 120 seconds. Note - // that MaxDelay must resolve to value greater than or equal to the MinDelay. + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, BucketNotExistsWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. MaxDelay time.Duration // LogWaitAttempts is used to enable logging for waiter retry attempts @@ -380,9 +461,9 @@ func NewBucketNotExistsWaiter(client HeadBucketAPIClient, optFns ...func(*Bucket } } -// Wait calls the waiter function for BucketNotExists waiter. The maxWaitDur is the -// maximum wait duration the waiter will wait. The maxWaitDur is required and must -// be greater than zero. +// Wait calls the waiter function for BucketNotExists waiter. The maxWaitDur is +// the maximum wait duration the waiter will wait. The maxWaitDur is required and +// must be greater than zero. func (w *BucketNotExistsWaiter) Wait(ctx context.Context, params *HeadBucketInput, maxWaitDur time.Duration, optFns ...func(*BucketNotExistsWaiterOptions)) error { _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) return err @@ -479,13 +560,13 @@ func newServiceMetadataMiddleware_opHeadBucket(region string) *awsmiddleware.Reg return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "HeadBucket", } } -// getHeadBucketBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, +// getHeadBucketBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, func getHeadBucketBucketMember(input interface{}) (*string, bool) { in := input.(*HeadBucketInput) if in.Bucket == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go index 1e745a7e..5f7b55e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" - smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/middleware" smithytime "github.com/aws/smithy-go/time" smithyhttp "github.com/aws/smithy-go/transport/http" @@ -18,93 +17,89 @@ import ( "time" ) -// The HEAD action retrieves metadata from an object without returning the object -// itself. This action is useful if you're only interested in an object's metadata. -// To use HEAD, you must have READ access to the object. A HEAD request has the -// same options as a GET action on an object. The response is identical to the GET -// response except that there is no response body. Because of this, if the HEAD -// request generates an error, it returns a generic 404 Not Found or 403 Forbidden -// code. It is not possible to retrieve the exact exception beyond these error -// codes. If you encrypt an object by using server-side encryption with -// customer-provided encryption keys (SSE-C) when you store the object in Amazon -// S3, then when you retrieve the metadata from the object, you must use the -// following headers: +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're interested only in an object's +// metadata. A HEAD request has the same options as a GET operation on an object. +// The response is identical to the GET response except that there is no response +// body. Because of this, if the HEAD request generates an error, it returns a +// generic code, such as 400 Bad Request , 403 Forbidden , 404 Not Found , 405 +// Method Not Allowed , 412 Precondition Failed , or 304 Not Modified . It's not +// possible to retrieve the exact exception of these error codes. Request headers +// are limited to 8 KB in size. For more information, see Common Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html) +// . Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - To use HEAD , you must have the +// s3:GetObject permission. You need the relevant read object (or version) +// permission for this operation. For more information, see Actions, resources, +// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) +// in the Amazon S3 User Guide. If the object you request doesn't exist, the error +// that Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. +// - If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an +// HTTP status code 404 Not Found error. +// - If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden error. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * x-amz-server-side-encryption-customer-algorithm +// Encryption Encryption request headers, like x-amz-server-side-encryption , +// should not be sent for HEAD requests if your object uses server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side +// encryption with Amazon S3 managed encryption keys (SSE-S3). The +// x-amz-server-side-encryption header is used when you PUT an object to S3 and +// want to specify the encryption method. If you include this header in a HEAD +// request for an object that uses these types of keys, you’ll get an HTTP 400 Bad +// Request error. It's because the encryption method can't be changed when you +// retrieve the object. If you encrypt an object by using server-side encryption +// with customer-provided encryption keys (SSE-C) when you store the object in +// Amazon S3, then when you retrieve the metadata from the object, you must use the +// following headers to provide the encryption key for the server to be able to +// retrieve the object's metadata. The headers are: +// - x-amz-server-side-encryption-customer-algorithm +// - x-amz-server-side-encryption-customer-key +// - x-amz-server-side-encryption-customer-key-MD5 // -// * -// x-amz-server-side-encryption-customer-key +// For more information about SSE-C, see Server-Side Encryption (Using +// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. Directory bucket permissions - For directory +// buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) ( +// AES256 ) is supported. Versioning // -// * -// x-amz-server-side-encryption-customer-key-MD5 +// - If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in the +// response. // -// For more information about SSE-C, -// see Server-Side Encryption (Using Customer-Provided Encryption Keys) -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). +// - If the specified version is a delete marker, the response returns a 405 +// Method Not Allowed error and the Last-Modified: timestamp response header. // -// * -// Encryption request headers, like x-amz-server-side-encryption, should not be -// sent for GET requests if your object uses server-side encryption with KMS keys -// (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys -// (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 -// BadRequest error. +// - Directory buckets - Delete marker is not supported by directory buckets. // -// * The last modified property in this case is the creation -// date of the object. +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. // -// Request headers are limited to 8 KB in size. For more -// information, see Common Request Headers -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). -// Consider the following when using request headers: -// -// * Consideration 1 – If both -// of the If-Match and If-Unmodified-Since headers are present in the request as -// follows: -// -// * If-Match condition evaluates to true, and; -// -// * If-Unmodified-Since -// condition evaluates to false; -// -// Then Amazon S3 returns 200 OK and the data -// requested. -// -// * Consideration 2 – If both of the If-None-Match and -// If-Modified-Since headers are present in the request as follows: -// -// * -// If-None-Match condition evaluates to false, and; -// -// * If-Modified-Since condition -// evaluates to true; -// -// Then Amazon S3 returns the 304 Not Modified response -// code. -// -// For more information about conditional requests, see RFC 7232 -// (https://tools.ietf.org/html/rfc7232). Permissions You need the relevant read -// object (or version) permission for this operation. For more information, see -// Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). If -// the object you request does not exist, the error Amazon S3 returns depends on -// whether you also have the s3:ListBucket permission. -// -// * If you have the -// s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code -// 404 ("no such key") error. -// -// * If you don’t have the s3:ListBucket permission, -// Amazon S3 returns an HTTP status code 403 ("access denied") error. -// -// The -// following actions are related to HeadObject: -// -// * GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// * -// GetObjectAttributes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following actions are +// related to HeadObject : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns ...func(*Options)) (*HeadObjectOutput, error) { if params == nil { params = &HeadObjectInput{} @@ -122,23 +117,31 @@ func (c *Client) HeadObject(ctx context.Context, params *HeadObjectInput, optFns type HeadObjectInput struct { - // The name of the bucket containing the object. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The name of the bucket that contains the object. Directory buckets - When you + // use this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -154,118 +157,158 @@ type HeadObjectInput struct { // the kms:Decrypt action for the request to succeed. ChecksumMode types.ChecksumMode - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Return the object only if its entity tag (ETag) is the same as the one - // specified; otherwise, return a 412 (precondition failed) error. + // specified; otherwise, return a 412 (precondition failed) error. If both of the + // If-Match and If-Unmodified-Since headers are present in the request as follows: + // - If-Match condition evaluates to true , and; + // - If-Unmodified-Since condition evaluates to false ; + // Then Amazon S3 returns 200 OK and the data requested. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfMatch *string // Return the object only if it has been modified since the specified time; - // otherwise, return a 304 (not modified) error. + // otherwise, return a 304 (not modified) error. If both of the If-None-Match and + // If-Modified-Since headers are present in the request as follows: + // - If-None-Match condition evaluates to false , and; + // - If-Modified-Since condition evaluates to true ; + // Then Amazon S3 returns the 304 Not Modified response code. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfModifiedSince *time.Time // Return the object only if its entity tag (ETag) is different from the one - // specified; otherwise, return a 304 (not modified) error. + // specified; otherwise, return a 304 (not modified) error. If both of the + // If-None-Match and If-Modified-Since headers are present in the request as + // follows: + // - If-None-Match condition evaluates to false , and; + // - If-Modified-Since condition evaluates to true ; + // Then Amazon S3 returns the 304 Not Modified response code. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfNoneMatch *string // Return the object only if it has not been modified since the specified time; - // otherwise, return a 412 (precondition failed) error. + // otherwise, return a 412 (precondition failed) error. If both of the If-Match + // and If-Unmodified-Since headers are present in the request as follows: + // - If-Match condition evaluates to true , and; + // - If-Unmodified-Since condition evaluates to false ; + // Then Amazon S3 returns 200 OK and the data requested. For more information + // about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232) . IfUnmodifiedSince *time.Time // Part number of the object being read. This is a positive integer between 1 and // 10,000. Effectively performs a 'ranged' HEAD request for the part specified. // Useful querying about the size of the part and the number of parts in this // object. - PartNumber int32 + PartNumber *int32 - // Because HeadObject returns only the metadata for an object, this parameter has - // no effect. + // HeadObject returns only the metadata for an object. If the Range is + // satisfiable, only the ContentLength is affected in the response. If the Range + // is not satisfiable, S3 returns a 416 - Requested Range Not Satisfiable error. Range *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. SSECustomerKeyMD5 *string - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. For directory + // buckets in this API operation, only the null value of the version ID is + // supported. VersionId *string noSmithyDocumentSerde } +func (in *HeadObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type HeadObjectOutput struct { // Indicates that a range of bytes was specified. AcceptRanges *string - // The archive state of the head object. + // The archive state of the head object. This functionality is not supported for + // directory buckets. ArchiveStatus types.ArchiveStatus // Indicates whether the object uses an S3 Bucket Key for server-side encryption - // with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + // with Key Management Service (KMS) keys (SSE-KMS). This functionality is not + // supported for directory buckets. + BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string // Specifies presentational information for the object. ContentDisposition *string - // Specifies what content encodings have been applied to the object and thus what + // Indicates what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the // Content-Type header field. ContentEncoding *string @@ -274,29 +317,31 @@ type HeadObjectOutput struct { ContentLanguage *string // Size of the body in bytes. - ContentLength int64 + ContentLength *int64 // A standard MIME type describing the format of the object data. ContentType *string // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker bool + // Marker. If false, this response header does not appear in the response. This + // functionality is not supported for directory buckets. + DeleteMarker *bool // An entity tag (ETag) is an opaque identifier assigned by a web server to a // specific version of a resource found at a URL. ETag *string - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key-value pairs - // providing object expiration information. The value of the rule-id is - // URL-encoded. + // If the object expiration is configured (see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) + // ), the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. This functionality is not supported for directory + // buckets. Expiration *string // The date and time at which the object is no longer cacheable. Expires *time.Time - // Creation date of the object. + // Date and time when the object was last modified. LastModified *time.Time // A map of metadata to store with the object in S3. @@ -307,114 +352,116 @@ type HeadObjectOutput struct { // This is set to the number of metadata entries not returned in x-amz-meta // headers. This can happen if you create metadata using an API like SOAP that // supports more flexible metadata than the REST API. For example, using SOAP, you - // can create metadata whose values are not legal HTTP headers. - MissingMeta int32 - - // Specifies whether a legal hold is in effect for this object. This header is only - // returned if the requester has the s3:GetObjectLegalHold permission. This header - // is not returned if the specified version of this object has never had a legal - // hold applied. For more information about S3 Object Lock, see Object Lock - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // can create metadata whose values are not legal HTTP headers. This functionality + // is not supported for directory buckets. + MissingMeta *int32 + + // Specifies whether a legal hold is in effect for this object. This header is + // only returned if the requester has the s3:GetObjectLegalHold permission. This + // header is not returned if the specified version of this object has never had a + // legal hold applied. For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // . This functionality is not supported for directory buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // The Object Lock mode, if any, that's in effect for this object. This header is // only returned if the requester has the s3:GetObjectRetention permission. For - // more information about S3 Object Lock, see Object Lock - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // . This functionality is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when the Object Lock retention period expires. This header is - // only returned if the requester has the s3:GetObjectRetention permission. + // only returned if the requester has the s3:GetObjectRetention permission. This + // functionality is not supported for directory buckets. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. This value is only returned if you specify // partNumber in your request and the object was uploaded as a multipart upload. - PartsCount int32 + PartsCount *int32 // Amazon S3 can return this header if your request involves a bucket that is // either a source or a destination in a replication rule. In replication, you have // a source bucket on which you configure replication and destination bucket or - // buckets where Amazon S3 stores object replicas. When you request an object - // (GetObject) or object metadata (HeadObject) from these buckets, Amazon S3 will + // buckets where Amazon S3 stores object replicas. When you request an object ( + // GetObject ) or object metadata ( HeadObject ) from these buckets, Amazon S3 will // return the x-amz-replication-status header in the response as follows: - // - // * If - // requesting an object from the source bucket, Amazon S3 will return the - // x-amz-replication-status header if the object in your request is eligible for - // replication. For example, suppose that in your replication configuration, you - // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with key - // prefix TaxDocs. Any objects you upload with this key name prefix, for example - // TaxDocs/document1.pdf, are eligible for replication. For any object request with - // this key name prefix, Amazon S3 will return the x-amz-replication-status header - // with value PENDING, COMPLETED or FAILED indicating object replication status. - // - // * - // If requesting an object from a destination bucket, Amazon S3 will return the - // x-amz-replication-status header with value REPLICA if the object in your request - // is a replica that Amazon S3 created and there is no replica modification - // replication in progress. - // - // * When replicating objects to multiple destination - // buckets, the x-amz-replication-status header acts differently. The header of the - // source object will only return a value of COMPLETED when replication is - // successful to all destinations. The header will remain at value PENDING until - // replication has completed for all destinations. If one or more destinations - // fails replication the header will return FAILED. - // - // For more information, see - // Replication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + // - If requesting an object from the source bucket, Amazon S3 will return the + // x-amz-replication-status header if the object in your request is eligible for + // replication. For example, suppose that in your replication configuration, you + // specify object prefix TaxDocs requesting Amazon S3 to replicate objects with + // key prefix TaxDocs . Any objects you upload with this key name prefix, for + // example TaxDocs/document1.pdf , are eligible for replication. For any object + // request with this key name prefix, Amazon S3 will return the + // x-amz-replication-status header with value PENDING, COMPLETED or FAILED + // indicating object replication status. + // - If requesting an object from a destination bucket, Amazon S3 will return + // the x-amz-replication-status header with value REPLICA if the object in your + // request is a replica that Amazon S3 created and there is no replica modification + // replication in progress. + // - When replicating objects to multiple destination buckets, the + // x-amz-replication-status header acts differently. The header of the source + // object will only return a value of COMPLETED when replication is successful to + // all destinations. The header will remain at value PENDING until replication has + // completed for all destinations. If one or more destinations fails replication + // the header will return FAILED. + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // . This functionality is not supported for directory buckets. ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // If the object is an archived object (an object whose storage class is GLACIER), // the response includes this header if either the archive restoration is in - // progress (see RestoreObject - // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) or an - // archive copy is already restored. If an archive copy is already restored, the - // header value indicates when Amazon S3 is scheduled to delete the object copy. - // For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec - // 2012 00:00:00 GMT" If the object restoration is in progress, the header returns - // the value ongoing-request="true". For more information about archiving objects, - // see Transitioning Objects: General Considerations - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. If an archive copy is already restored, + // the header value indicates when Amazon S3 is scheduled to delete the object + // copy. For example: x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 + // Dec 2012 00:00:00 GMT" If the object restoration is in progress, the header + // returns the value ongoing-request="true" . For more information about archiving + // objects, see Transitioning Objects: General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations) + // . This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. Restore *string - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm used. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for the - // object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // If the object is stored using server-side encryption either with an Amazon Web - // Services KMS key or an Amazon S3-managed encryption key, the response includes - // this header with the value of the server-side encryption algorithm used when - // storing this object in Amazon S3 (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. ServerSideEncryption types.ServerSideEncryption // Provides storage class information of the object. Amazon S3 returns this header // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // . Directory buckets - Only the S3 Express One Zone storage class is supported by + // directory buckets to store objects. StorageClass types.StorageClass - // Version of the object. + // Version ID of the object. This functionality is not supported for directory + // buckets. VersionId *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the - // value of this header in the object metadata. + // value of this header in the object metadata. This functionality is not supported + // for directory buckets. WebsiteRedirectLocation *string // Metadata pertaining to the operation's result. @@ -424,6 +471,9 @@ type HeadObjectOutput struct { } func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpHeadObject{}, middleware.After) if err != nil { return err @@ -432,6 +482,13 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "HeadObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -450,16 +507,13 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -468,7 +522,10 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpHeadObjectValidationMiddleware(stack); err != nil { @@ -480,6 +537,9 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addHeadObjectUpdateEndpoint(stack, options); err != nil { return err } @@ -495,9 +555,22 @@ func (c *Client) addOperationHeadObjectMiddlewares(stack *middleware.Stack, opti if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *HeadObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + // HeadObjectAPIClient is a client that implements the HeadObject operation. type HeadObjectAPIClient interface { HeadObject(context.Context, *HeadObjectInput, ...func(*Options)) (*HeadObjectOutput, error) @@ -518,9 +591,9 @@ type ObjectExistsWaiterOptions struct { // MinDelay must resolve to a value lesser than or equal to the MaxDelay. MinDelay time.Duration - // MaxDelay is the maximum amount of time to delay between retries. If unset or set - // to zero, ObjectExistsWaiter will use default max delay of 120 seconds. Note that - // MaxDelay must resolve to value greater than or equal to the MinDelay. + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, ObjectExistsWaiter will use default max delay of 120 seconds. Note + // that MaxDelay must resolve to value greater than or equal to the MinDelay. MaxDelay time.Duration // LogWaitAttempts is used to enable logging for waiter retry attempts @@ -649,13 +722,8 @@ func objectExistsStateRetryable(ctx context.Context, input *HeadObjectInput, out } if err != nil { - var apiErr smithy.APIError - ok := errors.As(err, &apiErr) - if !ok { - return false, fmt.Errorf("expected err to be of type smithy.APIError, got %w", err) - } - - if "NotFound" == apiErr.ErrorCode() { + var errorType *types.NotFound + if errors.As(err, &errorType) { return true, nil } } @@ -676,9 +744,9 @@ type ObjectNotExistsWaiterOptions struct { // MinDelay must resolve to a value lesser than or equal to the MaxDelay. MinDelay time.Duration - // MaxDelay is the maximum amount of time to delay between retries. If unset or set - // to zero, ObjectNotExistsWaiter will use default max delay of 120 seconds. Note - // that MaxDelay must resolve to value greater than or equal to the MinDelay. + // MaxDelay is the maximum amount of time to delay between retries. If unset or + // set to zero, ObjectNotExistsWaiter will use default max delay of 120 seconds. + // Note that MaxDelay must resolve to value greater than or equal to the MinDelay. MaxDelay time.Duration // LogWaitAttempts is used to enable logging for waiter retry attempts @@ -718,9 +786,9 @@ func NewObjectNotExistsWaiter(client HeadObjectAPIClient, optFns ...func(*Object } } -// Wait calls the waiter function for ObjectNotExists waiter. The maxWaitDur is the -// maximum wait duration the waiter will wait. The maxWaitDur is required and must -// be greater than zero. +// Wait calls the waiter function for ObjectNotExists waiter. The maxWaitDur is +// the maximum wait duration the waiter will wait. The maxWaitDur is required and +// must be greater than zero. func (w *ObjectNotExistsWaiter) Wait(ctx context.Context, params *HeadObjectInput, maxWaitDur time.Duration, optFns ...func(*ObjectNotExistsWaiterOptions)) error { _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...) return err @@ -804,13 +872,8 @@ func (w *ObjectNotExistsWaiter) WaitForOutput(ctx context.Context, params *HeadO func objectNotExistsStateRetryable(ctx context.Context, input *HeadObjectInput, output *HeadObjectOutput, err error) (bool, error) { if err != nil { - var apiErr smithy.APIError - ok := errors.As(err, &apiErr) - if !ok { - return false, fmt.Errorf("expected err to be of type smithy.APIError, got %w", err) - } - - if "NotFound" == apiErr.ErrorCode() { + var errorType *types.NotFound + if errors.As(err, &errorType) { return false, nil } } @@ -822,13 +885,13 @@ func newServiceMetadataMiddleware_opHeadObject(region string) *awsmiddleware.Reg return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "HeadObject", } } -// getHeadObjectBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, +// getHeadObjectBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, func getHeadObjectBucketMember(input interface{}) (*string, bool) { in := input.(*HeadObjectInput) if in.Bucket == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go index 0a0373f2..6ecf8458 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketAnalyticsConfigurations.go @@ -4,46 +4,37 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the analytics configurations for the bucket. You can have up to 1,000 -// analytics configurations per bucket. This action supports list pagination and -// does not return more than 100 configurations at a time. You should always check -// the IsTruncated element in the response. If there are no more configurations to -// list, IsTruncated is set to false. If there are more configurations to list, -// IsTruncated is set to true, and there will be a value in NextContinuationToken. -// You use the NextContinuationToken value to continue the pagination of the list -// by passing the value in continuation-token in the request to GET the next page. -// To use this operation, you must have permissions to perform the +// This operation is not supported by directory buckets. Lists the analytics +// configurations for the bucket. You can have up to 1,000 analytics configurations +// per bucket. This action supports list pagination and does not return more than +// 100 configurations at a time. You should always check the IsTruncated element +// in the response. If there are no more configurations to list, IsTruncated is +// set to false. If there are more configurations to list, IsTruncated is set to +// true, and there will be a value in NextContinuationToken . You use the +// NextContinuationToken value to continue the pagination of the list by passing +// the value in continuation-token in the request to GET the next page. To use +// this operation, you must have permissions to perform the // s3:GetAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// For information about Amazon S3 analytics feature, see Amazon S3 Analytics – -// Storage Class Analysis -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). -// The following operations are related to ListBucketAnalyticsConfigurations: -// -// * -// GetBucketAnalyticsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// -// * -// DeleteBucketAnalyticsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// -// * -// PutBucketAnalyticsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about Amazon S3 analytics feature, see Amazon S3 Analytics – +// Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// . The following operations are related to ListBucketAnalyticsConfigurations : +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) func (c *Client) ListBucketAnalyticsConfigurations(ctx context.Context, params *ListBucketAnalyticsConfigurationsInput, optFns ...func(*Options)) (*ListBucketAnalyticsConfigurationsOutput, error) { if params == nil { params = &ListBucketAnalyticsConfigurationsInput{} @@ -70,14 +61,19 @@ type ListBucketAnalyticsConfigurationsInput struct { // should begin. ContinuationToken *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *ListBucketAnalyticsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type ListBucketAnalyticsConfigurationsOutput struct { // The list of analytics configurations for a bucket. @@ -90,11 +86,11 @@ type ListBucketAnalyticsConfigurationsOutput struct { // Indicates whether the returned list of analytics configurations is complete. A // value of true indicates that the list is not complete and the // NextContinuationToken will be provided for a subsequent request. - IsTruncated bool + IsTruncated *bool // NextContinuationToken is sent when isTruncated is true, which indicates that // there are more analytics configurations to list. The next request must include - // this NextContinuationToken. The token is obfuscated and is not a usable value. + // this NextContinuationToken . The token is obfuscated and is not a usable value. NextContinuationToken *string // Metadata pertaining to the operation's result. @@ -104,6 +100,9 @@ type ListBucketAnalyticsConfigurationsOutput struct { } func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketAnalyticsConfigurations{}, middleware.After) if err != nil { return err @@ -112,6 +111,13 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketAnalyticsConfigurations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -130,16 +136,13 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -148,7 +151,10 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpListBucketAnalyticsConfigurationsValidationMiddleware(stack); err != nil { @@ -160,6 +166,9 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListBucketAnalyticsConfigurationsUpdateEndpoint(stack, options); err != nil { return err } @@ -175,14 +184,26 @@ func (c *Client) addOperationListBucketAnalyticsConfigurationsMiddlewares(stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *ListBucketAnalyticsConfigurationsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opListBucketAnalyticsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListBucketAnalyticsConfigurations", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go index 972a69c9..de4a2079 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketIntelligentTieringConfigurations.go @@ -4,15 +4,18 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the S3 Intelligent-Tiering configuration from the specified bucket. The S3 +// This operation is not supported by directory buckets. Lists the S3 +// Intelligent-Tiering configuration from the specified bucket. The S3 // Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering @@ -25,21 +28,11 @@ import ( // monitored and not eligible for auto-tiering. Smaller objects can be stored, but // they are always charged at the Frequent Access tier rates in the S3 // Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). -// Operations related to ListBucketIntelligentTieringConfigurations include: -// -// * -// DeleteBucketIntelligentTieringConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// -// * -// PutBucketIntelligentTieringConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) -// -// * -// GetBucketIntelligentTieringConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . Operations related to ListBucketIntelligentTieringConfigurations include: +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) func (c *Client) ListBucketIntelligentTieringConfigurations(ctx context.Context, params *ListBucketIntelligentTieringConfigurationsInput, optFns ...func(*Options)) (*ListBucketIntelligentTieringConfigurationsOutput, error) { if params == nil { params = &ListBucketIntelligentTieringConfigurationsInput{} @@ -70,6 +63,11 @@ type ListBucketIntelligentTieringConfigurationsInput struct { noSmithyDocumentSerde } +func (in *ListBucketIntelligentTieringConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type ListBucketIntelligentTieringConfigurationsOutput struct { // The ContinuationToken that represents a placeholder from where this request @@ -82,7 +80,7 @@ type ListBucketIntelligentTieringConfigurationsOutput struct { // Indicates whether the returned list of analytics configurations is complete. A // value of true indicates that the list is not complete and the // NextContinuationToken will be provided for a subsequent request. - IsTruncated bool + IsTruncated *bool // The marker used to continue this inventory configuration listing. Use the // NextContinuationToken from this response to continue the listing in a subsequent @@ -96,6 +94,9 @@ type ListBucketIntelligentTieringConfigurationsOutput struct { } func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketIntelligentTieringConfigurations{}, middleware.After) if err != nil { return err @@ -104,6 +105,13 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketIntelligentTieringConfigurations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -122,16 +130,13 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -140,7 +145,10 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpListBucketIntelligentTieringConfigurationsValidationMiddleware(stack); err != nil { @@ -152,6 +160,9 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListBucketIntelligentTieringConfigurationsUpdateEndpoint(stack, options); err != nil { return err } @@ -167,14 +178,26 @@ func (c *Client) addOperationListBucketIntelligentTieringConfigurationsMiddlewar if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *ListBucketIntelligentTieringConfigurationsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opListBucketIntelligentTieringConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListBucketIntelligentTieringConfigurations", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go index e6c8c79a..881f7d92 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketInventoryConfigurations.go @@ -4,45 +4,36 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list of inventory configurations for the bucket. You can have up to -// 1,000 analytics configurations per bucket. This action supports list pagination -// and does not return more than 100 configurations at a time. Always check the -// IsTruncated element in the response. If there are no more configurations to -// list, IsTruncated is set to false. If there are more configurations to list, -// IsTruncated is set to true, and there is a value in NextContinuationToken. You +// This operation is not supported by directory buckets. Returns a list of +// inventory configurations for the bucket. You can have up to 1,000 analytics +// configurations per bucket. This action supports list pagination and does not +// return more than 100 configurations at a time. Always check the IsTruncated +// element in the response. If there are no more configurations to list, +// IsTruncated is set to false. If there are more configurations to list, +// IsTruncated is set to true, and there is a value in NextContinuationToken . You // use the NextContinuationToken value to continue the pagination of the list by // passing the value in continuation-token in the request to GET the next page. To // use this operation, you must have permissions to perform the // s3:GetInventoryConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) The -// following operations are related to ListBucketInventoryConfigurations: -// -// * -// GetBucketInventoryConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// -// * -// DeleteBucketInventoryConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// -// * -// PutBucketInventoryConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// The following operations are related to ListBucketInventoryConfigurations : +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) func (c *Client) ListBucketInventoryConfigurations(ctx context.Context, params *ListBucketInventoryConfigurationsInput, optFns ...func(*Options)) (*ListBucketInventoryConfigurationsOutput, error) { if params == nil { params = &ListBucketInventoryConfigurationsInput{} @@ -71,14 +62,19 @@ type ListBucketInventoryConfigurationsInput struct { // Amazon S3 understands. ContinuationToken *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *ListBucketInventoryConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type ListBucketInventoryConfigurationsOutput struct { // If sent in the request, the marker that is used as a starting point for this @@ -88,10 +84,10 @@ type ListBucketInventoryConfigurationsOutput struct { // The list of inventory configurations for a bucket. InventoryConfigurationList []types.InventoryConfiguration - // Tells whether the returned list of inventory configurations is complete. A value - // of true indicates that the list is not complete and the NextContinuationToken is - // provided for a subsequent request. - IsTruncated bool + // Tells whether the returned list of inventory configurations is complete. A + // value of true indicates that the list is not complete and the + // NextContinuationToken is provided for a subsequent request. + IsTruncated *bool // The marker used to continue this inventory configuration listing. Use the // NextContinuationToken from this response to continue the listing in a subsequent @@ -105,6 +101,9 @@ type ListBucketInventoryConfigurationsOutput struct { } func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketInventoryConfigurations{}, middleware.After) if err != nil { return err @@ -113,6 +112,13 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketInventoryConfigurations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -131,16 +137,13 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -149,7 +152,10 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpListBucketInventoryConfigurationsValidationMiddleware(stack); err != nil { @@ -161,6 +167,9 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListBucketInventoryConfigurationsUpdateEndpoint(stack, options); err != nil { return err } @@ -176,14 +185,26 @@ func (c *Client) addOperationListBucketInventoryConfigurationsMiddlewares(stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *ListBucketInventoryConfigurationsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opListBucketInventoryConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListBucketInventoryConfigurations", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go index 50b207af..fc2cf728 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBucketMetricsConfigurations.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,39 +13,28 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Lists the metrics configurations for the bucket. The metrics configurations are -// only for the request metrics of the bucket and do not provide information on -// daily storage metrics. You can have up to 1,000 configurations per bucket. This -// action supports list pagination and does not return more than 100 configurations -// at a time. Always check the IsTruncated element in the response. If there are no +// This operation is not supported by directory buckets. Lists the metrics +// configurations for the bucket. The metrics configurations are only for the +// request metrics of the bucket and do not provide information on daily storage +// metrics. You can have up to 1,000 configurations per bucket. This action +// supports list pagination and does not return more than 100 configurations at a +// time. Always check the IsTruncated element in the response. If there are no // more configurations to list, IsTruncated is set to false. If there are more // configurations to list, IsTruncated is set to true, and there is a value in -// NextContinuationToken. You use the NextContinuationToken value to continue the -// pagination of the list by passing the value in continuation-token in the request -// to GET the next page. To use this operation, you must have permissions to -// perform the s3:GetMetricsConfiguration action. The bucket owner has this +// NextContinuationToken . You use the NextContinuationToken value to continue the +// pagination of the list by passing the value in continuation-token in the +// request to GET the next page. To use this operation, you must have permissions +// to perform the s3:GetMetricsConfiguration action. The bucket owner has this // permission by default. The bucket owner can grant this permission to others. For // more information about permissions, see Permissions Related to Bucket -// Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// For more information about metrics configurations and CloudWatch request -// metrics, see Monitoring Metrics with Amazon CloudWatch -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// The following operations are related to ListBucketMetricsConfigurations: -// -// * -// PutBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) -// -// * -// GetBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// -// * -// DeleteBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// . The following operations are related to ListBucketMetricsConfigurations : +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) func (c *Client) ListBucketMetricsConfigurations(ctx context.Context, params *ListBucketMetricsConfigurationsInput, optFns ...func(*Options)) (*ListBucketMetricsConfigurationsOutput, error) { if params == nil { params = &ListBucketMetricsConfigurationsInput{} @@ -73,14 +63,19 @@ type ListBucketMetricsConfigurationsInput struct { // Amazon S3 understands. ContinuationToken *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *ListBucketMetricsConfigurationsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type ListBucketMetricsConfigurationsOutput struct { // The marker that is used as a starting point for this metrics configuration list @@ -90,7 +85,7 @@ type ListBucketMetricsConfigurationsOutput struct { // Indicates whether the returned list of metrics configurations is complete. A // value of true indicates that the list is not complete and the // NextContinuationToken will be provided for a subsequent request. - IsTruncated bool + IsTruncated *bool // The list of metrics configurations for a bucket. MetricsConfigurationList []types.MetricsConfiguration @@ -108,6 +103,9 @@ type ListBucketMetricsConfigurationsOutput struct { } func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListBucketMetricsConfigurations{}, middleware.After) if err != nil { return err @@ -116,6 +114,13 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBucketMetricsConfigurations"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -134,16 +139,13 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -152,7 +154,10 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpListBucketMetricsConfigurationsValidationMiddleware(stack); err != nil { @@ -164,6 +169,9 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListBucketMetricsConfigurationsUpdateEndpoint(stack, options); err != nil { return err } @@ -179,14 +187,26 @@ func (c *Client) addOperationListBucketMetricsConfigurationsMiddlewares(stack *m if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *ListBucketMetricsConfigurationsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opListBucketMetricsConfigurations(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListBucketMetricsConfigurations", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go index 7a3de38f..a6189288 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListBuckets.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,8 +13,11 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns a list of all buckets owned by the authenticated sender of the request. -// To use this operation, you must have the s3:ListAllMyBuckets permission. +// This operation is not supported by directory buckets. Returns a list of all +// buckets owned by the authenticated sender of the request. To use this operation, +// you must have the s3:ListAllMyBuckets permission. For information about Amazon +// S3 buckets, see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) +// . func (c *Client) ListBuckets(ctx context.Context, params *ListBucketsInput, optFns ...func(*Options)) (*ListBucketsOutput, error) { if params == nil { params = &ListBucketsInput{} @@ -48,6 +52,9 @@ type ListBucketsOutput struct { } func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListBuckets{}, middleware.After) if err != nil { return err @@ -56,6 +63,13 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListBuckets"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -74,16 +88,13 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -92,7 +103,10 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBuckets(options.Region), middleware.Before); err != nil { @@ -101,6 +115,9 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListBucketsUpdateEndpoint(stack, options); err != nil { return err } @@ -116,6 +133,12 @@ func (c *Client) addOperationListBucketsMiddlewares(stack *middleware.Stack, opt if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } @@ -123,7 +146,6 @@ func newServiceMetadataMiddleware_opListBuckets(region string) *awsmiddleware.Re return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListBuckets", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go new file mode 100644 index 00000000..373531ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListDirectoryBuckets.go @@ -0,0 +1,291 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns a list of all Amazon S3 directory buckets owned by the authenticated +// sender of the request. For more information about directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Regional endpoint. These endpoints +// support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions You must have the +// s3express:ListAllMyDirectoryBuckets permission in an IAM identity-based policy +// instead of a bucket policy. Cross-account access to this API operation isn't +// supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is s3express-control.region.amazonaws.com . +func (c *Client) ListDirectoryBuckets(ctx context.Context, params *ListDirectoryBucketsInput, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { + if params == nil { + params = &ListDirectoryBucketsInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "ListDirectoryBuckets", params, optFns, c.addOperationListDirectoryBucketsMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*ListDirectoryBucketsOutput) + out.ResultMetadata = metadata + return out, nil +} + +type ListDirectoryBucketsInput struct { + + // ContinuationToken indicates to Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string + + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxDirectoryBuckets *int32 + + noSmithyDocumentSerde +} + +func (in *ListDirectoryBucketsInput) bindEndpointParams(p *EndpointParameters) { + + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + +type ListDirectoryBucketsOutput struct { + + // The list of buckets owned by the requester. + Buckets []types.Bucket + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + ContinuationToken *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationListDirectoryBucketsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsRestxml_serializeOpListDirectoryBuckets{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsRestxml_deserializeOpListDirectoryBuckets{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListDirectoryBuckets"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { + return err + } + if err = addRetryMiddlewares(stack, options); err != nil { + return err + } + if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListDirectoryBuckets(options.Region), middleware.Before); err != nil { + return err + } + if err = addMetadataRetrieverMiddleware(stack); err != nil { + return err + } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } + if err = addListDirectoryBucketsUpdateEndpoint(stack, options); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil { + return err + } + if err = disableAcceptEncodingGzip(stack); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + return nil +} + +// ListDirectoryBucketsAPIClient is a client that implements the +// ListDirectoryBuckets operation. +type ListDirectoryBucketsAPIClient interface { + ListDirectoryBuckets(context.Context, *ListDirectoryBucketsInput, ...func(*Options)) (*ListDirectoryBucketsOutput, error) +} + +var _ ListDirectoryBucketsAPIClient = (*Client)(nil) + +// ListDirectoryBucketsPaginatorOptions is the paginator options for +// ListDirectoryBuckets +type ListDirectoryBucketsPaginatorOptions struct { + // Maximum number of buckets to be returned in response. When the number is more + // than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListDirectoryBucketsPaginator is a paginator for ListDirectoryBuckets +type ListDirectoryBucketsPaginator struct { + options ListDirectoryBucketsPaginatorOptions + client ListDirectoryBucketsAPIClient + params *ListDirectoryBucketsInput + nextToken *string + firstPage bool +} + +// NewListDirectoryBucketsPaginator returns a new ListDirectoryBucketsPaginator +func NewListDirectoryBucketsPaginator(client ListDirectoryBucketsAPIClient, params *ListDirectoryBucketsInput, optFns ...func(*ListDirectoryBucketsPaginatorOptions)) *ListDirectoryBucketsPaginator { + if params == nil { + params = &ListDirectoryBucketsInput{} + } + + options := ListDirectoryBucketsPaginatorOptions{} + if params.MaxDirectoryBuckets != nil { + options.Limit = *params.MaxDirectoryBuckets + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListDirectoryBucketsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + nextToken: params.ContinuationToken, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListDirectoryBucketsPaginator) HasMorePages() bool { + return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) +} + +// NextPage retrieves the next ListDirectoryBuckets page. +func (p *ListDirectoryBucketsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListDirectoryBucketsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.ContinuationToken = p.nextToken + + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxDirectoryBuckets = limit + + result, err := p.client.ListDirectoryBuckets(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.nextToken + p.nextToken = result.ContinuationToken + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.nextToken != nil && + *prevToken == *p.nextToken { + p.nextToken = nil + } + + return result, nil +} + +func newServiceMetadataMiddleware_opListDirectoryBuckets(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "ListDirectoryBuckets", + } +} + +func addListDirectoryBucketsUpdateEndpoint(stack *middleware.Stack, options Options) error { + return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{ + Accessor: s3cust.UpdateEndpointParameterAccessor{ + GetBucketFromInput: nopGetBucketAccessor, + }, + UsePathStyle: options.UsePathStyle, + UseAccelerate: options.UseAccelerate, + SupportsAccelerate: true, + TargetS3ObjectLambda: false, + EndpointResolver: options.EndpointResolver, + EndpointResolverOptions: options.EndpointOptions, + UseARNRegion: options.UseARNRegion, + DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go index af281a25..3e6853e6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,46 +13,71 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action lists in-progress multipart uploads. An in-progress multipart upload -// is a multipart upload that has been initiated using the Initiate Multipart -// Upload request, but has not yet been completed or aborted. This action returns -// at most 1,000 multipart uploads in the response. 1,000 multipart uploads is the -// maximum number of uploads a response can include, which is also the default -// value. You can further limit the number of uploads in a response by specifying -// the max-uploads parameter in the response. If additional multipart uploads -// satisfy the list criteria, the response will contain an IsTruncated element with -// the value true. To list the additional multipart uploads, use the key-marker and -// upload-id-marker request parameters. In the response, the uploads are sorted by -// key. If your application has initiated more than one multipart upload using the -// same object key, then uploads in the response are first sorted by key. -// Additionally, uploads are sorted in ascending order within each key by the -// upload initiation time. For more information on multipart uploads, see Uploading -// Objects Using Multipart Upload -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For -// information on permissions required to use the multipart upload API, see -// Multipart Upload and Permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The -// following operations are related to ListMultipartUploads: +// This operation lists in-progress multipart uploads in a bucket. An in-progress +// multipart upload is a multipart upload that has been initiated by the +// CreateMultipartUpload request, but has not yet been completed or aborted. +// Directory buckets - If multipart uploads in a directory bucket are in progress, +// you can't delete the bucket until all the in-progress multipart uploads are +// aborted or completed. The ListMultipartUploads operation returns a maximum of +// 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is +// also the default value. You can further limit the number of uploads in a +// response by specifying the max-uploads request parameter. If there are more +// than 1,000 multipart uploads that satisfy your ListMultipartUploads request, +// the response returns an IsTruncated element with the value of true , a +// NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining +// multipart uploads, you need to make subsequent ListMultipartUploads requests. +// In these requests, include two query parameters: key-marker and upload-id-marker +// . Set the value of key-marker to the NextKeyMarker value from the previous +// response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker +// value from the previous response. Directory buckets - The upload-id-marker +// element and the NextUploadIdMarker element aren't supported by directory +// buckets. To list the additional multipart uploads, you only need to set the +// value of key-marker to the NextKeyMarker value from the previous response. For +// more information about multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * -// CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// Sorting of multipart uploads in response +// - General purpose bucket - In the ListMultipartUploads response, the multipart +// uploads are sorted based on two criteria: +// - Key-based sorting - Multipart uploads are initially sorted in ascending +// order based on their object keys. +// - Time-based sorting - For uploads that share the same object key, they are +// further sorted in ascending order based on the upload initiation time. Among +// uploads with the same key, the one that was initiated first will appear before +// the ones that were initiated later. +// - Directory bucket - In the ListMultipartUploads response, the multipart +// uploads aren't sorted lexicographically based on the object keys. // -// * -// UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// * -// CompleteMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// * -// ListParts -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// * -// AbortMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to ListMultipartUploads : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipartUploadsInput, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { if params == nil { params = &ListMultipartUploadsInput{} @@ -69,124 +95,171 @@ func (c *Client) ListMultipartUploads(ctx context.Context, params *ListMultipart type ListMultipartUploadsInput struct { - // The name of the bucket to which the multipart upload was initiated. When using - // this action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The name of the bucket to which the multipart upload was initiated. Directory + // buckets - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string // Character you use to group keys. All keys that contain the same string between // the prefix, if specified, and the first occurrence of the delimiter after the - // prefix are grouped under a single result element, CommonPrefixes. If you don't + // prefix are grouped under a single result element, CommonPrefixes . If you don't // specify the prefix parameter, then the substring starts at the beginning of the // key. The keys that are grouped under CommonPrefixes result element are not - // returned elsewhere in the response. + // returned elsewhere in the response. Directory buckets - For directory buckets, / + // is the only supported delimiter. Delimiter *string // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters with an - // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the + // encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in XML + // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the // response. EncodingType types.EncodingType - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. If upload-id-marker is not specified, only the - // keys lexicographically greater than the specified key-marker will be included in - // the list. If upload-id-marker is specified, any multipart uploads for a key - // equal to the key-marker might also be included, provided those multipart uploads - // have upload IDs lexicographically greater than the specified upload-id-marker. + // Specifies the multipart upload after which listing should begin. + // - General purpose buckets - For general purpose buckets, key-marker is an + // object key. Together with upload-id-marker , this parameter specifies the + // multipart upload after which listing should begin. If upload-id-marker is not + // specified, only the keys lexicographically greater than the specified + // key-marker will be included in the list. If upload-id-marker is specified, any + // multipart uploads for a key equal to the key-marker might also be included, + // provided those multipart uploads have upload IDs lexicographically greater than + // the specified upload-id-marker . + // - Directory buckets - For directory buckets, key-marker is obfuscated and + // isn't a real object key. The upload-id-marker parameter isn't supported by + // directory buckets. To list the additional multipart uploads, you only need to + // set the value of key-marker to the NextKeyMarker value from the previous + // response. In the ListMultipartUploads response, the multipart uploads aren't + // sorted lexicographically based on the object keys. KeyMarker *string // Sets the maximum number of multipart uploads, from 1 to 1,000, to return in the // response body. 1,000 is the maximum number of uploads that can be returned in a // response. - MaxUploads int32 + MaxUploads *int32 // Lists in-progress uploads only for those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different grouping of - // keys. (You can think of using prefix to make groups in the same way you'd use a - // folder in a file system.) + // keys. (You can think of using prefix to make groups in the same way that you'd + // use a folder in a file system.) Directory buckets - For directory buckets, only + // prefixes that end in a delimiter ( / ) are supported. Prefix *string + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + // Together with key-marker, specifies the multipart upload after which listing // should begin. If key-marker is not specified, the upload-id-marker parameter is // ignored. Otherwise, any multipart uploads for a key equal to the key-marker // might be included in the list only if they have an upload ID lexicographically - // greater than the specified upload-id-marker. + // greater than the specified upload-id-marker . This functionality is not + // supported for directory buckets. UploadIdMarker *string noSmithyDocumentSerde } +func (in *ListMultipartUploadsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Prefix = in.Prefix + +} + type ListMultipartUploadsOutput struct { // The name of the bucket to which the multipart upload was initiated. Does not // return the access point ARN or access point alias if used. Bucket *string - // If you specify a delimiter in the request, then the result returns each distinct - // key prefix containing the delimiter in a CommonPrefixes element. The distinct - // key prefixes are returned in the Prefix child element. + // If you specify a delimiter in the request, then the result returns each + // distinct key prefix containing the delimiter in a CommonPrefixes element. The + // distinct key prefixes are returned in the Prefix child element. Directory + // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are + // supported. CommonPrefixes []types.CommonPrefix // Contains the delimiter you specified in the request. If you don't specify a - // delimiter in your request, this element is absent from the response. + // delimiter in your request, this element is absent from the response. Directory + // buckets - For directory buckets, / is the only supported delimiter. Delimiter *string // Encoding type used by Amazon S3 to encode object keys in the response. If you - // specify encoding-type request parameter, Amazon S3 includes this element in the - // response, and returns encoded key name values in the following response - // elements: Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. + // specify the encoding-type request parameter, Amazon S3 includes this element in + // the response, and returns encoded key name values in the following response + // elements: Delimiter , KeyMarker , Prefix , NextKeyMarker , Key . EncodingType types.EncodingType // Indicates whether the returned list of multipart uploads is truncated. A value // of true indicates that the list was truncated. The list can be truncated if the // number of multipart uploads exceeds the limit allowed or specified by max // uploads. - IsTruncated bool + IsTruncated *bool // The key at or after which the listing began. KeyMarker *string // Maximum number of multipart uploads that could have been included in the // response. - MaxUploads int32 + MaxUploads *int32 // When a list is truncated, this element specifies the value that should be used // for the key-marker request parameter in a subsequent request. NextKeyMarker *string // When a list is truncated, this element specifies the value that should be used - // for the upload-id-marker request parameter in a subsequent request. + // for the upload-id-marker request parameter in a subsequent request. This + // functionality is not supported for directory buckets. NextUploadIdMarker *string // When a prefix is provided in the request, this field contains the specified // prefix. The result contains only keys starting with the specified prefix. + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // ( / ) are supported. Prefix *string - // Upload ID after which listing began. + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // Upload ID after which listing began. This functionality is not supported for + // directory buckets. UploadIdMarker *string // Container for elements related to a particular multipart upload. A response can @@ -200,6 +273,9 @@ type ListMultipartUploadsOutput struct { } func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListMultipartUploads{}, middleware.After) if err != nil { return err @@ -208,6 +284,13 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListMultipartUploads"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -226,16 +309,13 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -244,7 +324,10 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpListMultipartUploadsValidationMiddleware(stack); err != nil { @@ -256,6 +339,9 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListMultipartUploadsUpdateEndpoint(stack, options); err != nil { return err } @@ -271,14 +357,26 @@ func (c *Client) addOperationListMultipartUploadsMiddlewares(stack *middleware.S if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *ListMultipartUploadsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opListMultipartUploads(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListMultipartUploads", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go index f2d2b9fa..2a0cd10f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,30 +13,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns metadata about all versions of the objects in a bucket. You can also use -// request parameters as selection criteria to return metadata about a subset of -// all the object versions. To use this operation, you must have permissions to -// perform the s3:ListBucketVersions action. Be aware of the name difference. A 200 -// OK response can contain valid or invalid XML. Make sure to design your -// application to parse the contents of the response and handle it appropriately. -// To use this operation, you must have READ access to the bucket. This action is -// not supported by Amazon S3 on Outposts. The following operations are related to -// ListObjectVersions: -// -// * ListObjectsV2 -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// -// * -// GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// * -// PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// * -// DeleteObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// This operation is not supported by directory buckets. Returns metadata about +// all versions of the objects in a bucket. You can also use request parameters as +// selection criteria to return metadata about a subset of all the object versions. +// To use this operation, you must have permission to perform the +// s3:ListBucketVersions action. Be aware of the name difference. A 200 OK +// response can contain valid or invalid XML. Make sure to design your application +// to parse the contents of the response and handle it appropriately. To use this +// operation, you must have READ access to the bucket. The following operations are +// related to ListObjectVersions : +// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) ListObjectVersions(ctx context.Context, params *ListObjectVersionsInput, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { if params == nil { params = &ListObjectVersionsInput{} @@ -58,49 +48,69 @@ type ListObjectVersionsInput struct { // This member is required. Bucket *string - // A delimiter is a character that you specify to group keys. All keys that contain - // the same string between the prefix and the first occurrence of the delimiter are - // grouped under a single result element in CommonPrefixes. These groups are - // counted as one result against the max-keys limitation. These keys are not - // returned elsewhere in the response. + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes . These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. Delimiter *string // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters with an - // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the + // encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in XML + // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the // response. EncodingType types.EncodingType - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Specifies the key to start with when listing objects in a bucket. KeyMarker *string - // Sets the maximum number of keys returned in the response. By default the action - // returns up to 1,000 key names. The response might contain fewer keys but will - // never contain more. If additional keys satisfy the search criteria, but were not - // returned because max-keys was exceeded, the response contains true. To return - // the additional keys, see key-marker and version-id-marker. - MaxKeys int32 + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. If additional keys satisfy the search criteria, but + // were not returned because max-keys was exceeded, the response contains true . To + // return the additional keys, see key-marker and version-id-marker . + MaxKeys *int32 + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []types.OptionalObjectAttributes // Use this parameter to select only those keys that begin with the specified // prefix. You can use prefixes to separate a bucket into different groupings of - // keys. (You can think of using prefix to make groups in the same way you'd use a - // folder in a file system.) You can use prefix with delimiter to roll up numerous - // objects into a single result under CommonPrefixes. + // keys. (You can think of using prefix to make groups in the same way that you'd + // use a folder in a file system.) You can use prefix with delimiter to roll up + // numerous objects into a single result under CommonPrefixes . Prefix *string + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. + RequestPayer types.RequestPayer + // Specifies the object version you want to start listing from. VersionIdMarker *string noSmithyDocumentSerde } +func (in *ListObjectVersionsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Prefix = in.Prefix + +} + type ListObjectVersionsOutput struct { // All of the keys rolled up into a common prefix count as a single return when @@ -113,38 +123,38 @@ type ListObjectVersionsOutput struct { // The delimiter grouping the included keys. A delimiter is a character that you // specify to group keys. All keys that contain the same string between the prefix // and the first occurrence of the delimiter are grouped under a single result - // element in CommonPrefixes. These groups are counted as one result against the + // element in CommonPrefixes . These groups are counted as one result against the // max-keys limitation. These keys are not returned elsewhere in the response. Delimiter *string // Encoding type used by Amazon S3 to encode object key names in the XML response. - // If you specify encoding-type request parameter, Amazon S3 includes this element - // in the response, and returns encoded key name values in the following response - // elements: KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: KeyMarker, NextKeyMarker, Prefix, Key , and Delimiter . EncodingType types.EncodingType // A flag that indicates whether Amazon S3 returned all of the results that // satisfied the search criteria. If your results were truncated, you can make a - // follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker // response parameters as a starting place in another request to return the rest of // the results. - IsTruncated bool + IsTruncated *bool // Marks the last key returned in a truncated response. KeyMarker *string // Specifies the maximum number of objects to return. - MaxKeys int32 + MaxKeys *int32 // The bucket name. Name *string - // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // When the number of responses exceeds the value of MaxKeys , NextKeyMarker // specifies the first key not returned that satisfies the search criteria. Use // this value for the key-marker request parameter in a subsequent request. NextKeyMarker *string - // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // When the number of responses exceeds the value of MaxKeys , NextVersionIdMarker // specifies the first object version not returned that satisfies the search // criteria. Use this value for the version-id-marker request parameter in a // subsequent request. @@ -153,6 +163,10 @@ type ListObjectVersionsOutput struct { // Selects objects that start with the value supplied by this parameter. Prefix *string + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + // Marks the last version of the key returned in a truncated response. VersionIdMarker *string @@ -166,6 +180,9 @@ type ListObjectVersionsOutput struct { } func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectVersions{}, middleware.After) if err != nil { return err @@ -174,6 +191,13 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectVersions"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -192,16 +216,13 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -210,7 +231,10 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpListObjectVersionsValidationMiddleware(stack); err != nil { @@ -222,6 +246,9 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListObjectVersionsUpdateEndpoint(stack, options); err != nil { return err } @@ -237,14 +264,26 @@ func (c *Client) addOperationListObjectVersionsMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *ListObjectVersionsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opListObjectVersions(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListObjectVersions", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go index b2d83ff7..f0732edc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjects.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -12,35 +13,19 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Returns some or all (up to 1,000) of the objects in a bucket. You can use the -// request parameters as selection criteria to return a subset of the objects in a -// bucket. A 200 OK response can contain valid or invalid XML. Be sure to design -// your application to parse the contents of the response and handle it -// appropriately. This action has been revised. We recommend that you use the newer -// version, ListObjectsV2 -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), when -// developing applications. For backward compatibility, Amazon S3 continues to -// support ListObjects. The following operations are related to ListObjects: -// -// * -// ListObjectsV2 -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) -// -// * -// GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// * -// PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// * -// CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// * -// ListBuckets -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// This operation is not supported by directory buckets. Returns some or all (up +// to 1,000) of the objects in a bucket. You can use the request parameters as +// selection criteria to return a subset of the objects in a bucket. A 200 OK +// response can contain valid or invalid XML. Be sure to design your application to +// parse the contents of the response and handle it appropriately. This action has +// been revised. We recommend that you use the newer version, ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// , when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects . The following operations are related to ListObjects : +// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optFns ...func(*Options)) (*ListObjectsOutput, error) { if params == nil { params = &ListObjectsInput{} @@ -58,51 +43,63 @@ func (c *Client) ListObjects(ctx context.Context, params *ListObjectsInput, optF type ListObjectsInput struct { - // The name of the bucket containing the objects. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The name of the bucket containing the objects. Directory buckets - When you use + // this operation with a directory bucket, you must use virtual-hosted-style + // requests in the format Bucket_name.s3express-az_id.region.amazonaws.com . + // Path-style requests are not supported. Directory bucket names must be unique in + // the chosen Availability Zone. Bucket names must follow the format + // bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string - // A delimiter is a character you use to group keys. + // A delimiter is a character that you use to group keys. Delimiter *string // Requests Amazon S3 to encode the object keys in the response and specifies the - // encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters with an - // ASCII value from 0 to 10. For characters that are not supported in XML 1.0, you - // can add this parameter to request that Amazon S3 encode the keys in the + // encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in XML + // 1.0, you can add this parameter to request that Amazon S3 encode the keys in the // response. EncodingType types.EncodingType - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts // listing after this specified key. Marker can be any key in the bucket. Marker *string - // Sets the maximum number of keys returned in the response. By default the action - // returns up to 1,000 key names. The response might contain fewer keys but will - // never contain more. - MaxKeys int32 + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. + MaxKeys *int32 + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []types.OptionalObjectAttributes // Limits the response to keys that begin with the specified prefix. Prefix *string @@ -115,6 +112,12 @@ type ListObjectsInput struct { noSmithyDocumentSerde } +func (in *ListObjectsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Prefix = in.Prefix + +} + type ListObjectsOutput struct { // All of the keys (up to 1,000) rolled up in a common prefix count as a single @@ -122,10 +125,10 @@ type ListObjectsOutput struct { // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if // there are any) keys between Prefix and the next occurrence of the string // specified by the delimiter. CommonPrefixes lists keys that act like - // subdirectories in the directory specified by Prefix. For example, if the prefix - // is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common - // prefix is notes/summer/. All of the keys that roll up into a common prefix count - // as a single return when calculating the number of returns. + // subdirectories in the directory specified by Prefix . For example, if the prefix + // is notes/ and the delimiter is a slash ( / ), as in notes/summer/july , the + // common prefix is notes/summer/ . All of the keys that roll up into a common + // prefix count as a single return when calculating the number of returns. CommonPrefixes []types.CommonPrefix // Metadata about each object returned. @@ -143,30 +146,35 @@ type ListObjectsOutput struct { // A flag that indicates whether Amazon S3 returned all of the results that // satisfied the search criteria. - IsTruncated bool + IsTruncated *bool - // Indicates where in the bucket listing begins. Marker is included in the response - // if it was sent with the request. + // Indicates where in the bucket listing begins. Marker is included in the + // response if it was sent with the request. Marker *string // The maximum number of keys returned in the response body. - MaxKeys int32 + MaxKeys *int32 // The bucket name. Name *string - // When response is truncated (the IsTruncated element value in the response is - // true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request - // parameter specified. If response does not include the NextMarker and it is - // truncated, you can use the value of the last Key in the response as the marker - // in the subsequent request to get the next set of object keys. + // When the response is truncated (the IsTruncated element value in the response + // is true ), you can use the key name in this field as the marker parameter in + // the subsequent request to get the next set of objects. Amazon S3 lists objects + // in alphabetical order. This element is returned only if you have the delimiter + // request parameter specified. If the response does not include the NextMarker + // element and it is truncated, you can use the value of the last Key element in + // the response as the marker parameter in the subsequent request to get the next + // set of object keys. NextMarker *string // Keys that begin with the indicated prefix. Prefix *string + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -174,6 +182,9 @@ type ListObjectsOutput struct { } func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListObjects{}, middleware.After) if err != nil { return err @@ -182,6 +193,13 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjects"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -200,16 +218,13 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -218,7 +233,10 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpListObjectsValidationMiddleware(stack); err != nil { @@ -230,6 +248,9 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListObjectsUpdateEndpoint(stack, options); err != nil { return err } @@ -245,14 +266,26 @@ func (c *Client) addOperationListObjectsMiddlewares(stack *middleware.Stack, opt if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *ListObjectsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opListObjects(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListObjects", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go index 6214d247..b2f182df 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectsV2.go @@ -17,37 +17,51 @@ import ( // You can use the request parameters as selection criteria to return a subset of // the objects in a bucket. A 200 OK response can contain valid or invalid XML. // Make sure to design your application to parse the contents of the response and -// handle it appropriately. Objects are returned sorted in an ascending order of -// the respective key names in the list. For more information about listing -// objects, see Listing object keys programmatically -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) -// To use this operation, you must have READ access to the bucket. To use this -// action in an Identity and Access Management (IAM) policy, you must have -// permissions to perform the s3:ListBucket action. The bucket owner has this -// permission by default and can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// This section describes the latest revision of this action. We recommend that you -// use this revised API for application development. For backward compatibility, -// Amazon S3 continues to support the prior version of this API, ListObjects -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). To get a -// list of your buckets, see ListBuckets -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). The -// following operations are related to ListObjectsV2: +// handle it appropriately. For more information about listing objects, see +// Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// . Directory buckets - For directory buckets, you must make requests for this API +// operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - To use this operation, you must have +// READ access to the bucket. You must have permission to perform the +// s3:ListBucket action. The bucket owner has this permission by default and can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// Sorting order of returned objects +// - General purpose bucket - For general purpose buckets, ListObjectsV2 returns +// objects in lexicographical order based on their key names. +// - Directory bucket - For directory buckets, ListObjectsV2 does not return +// objects in lexicographical order. // -// * -// PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// * -// CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . This section describes the +// latest revision of this action. We recommend that you use this revised API +// operation for application development. For backward compatibility, Amazon S3 +// continues to support the prior version of this API operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// . The following operations are related to ListObjectsV2 : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, optFns ...func(*Options)) (*ListObjectsV2Output, error) { if params == nil { params = &ListObjectsV2Input{} @@ -65,140 +79,174 @@ func (c *Client) ListObjectsV2(ctx context.Context, params *ListObjectsV2Input, type ListObjectsV2Input struct { - // Bucket name to list. When using this action with an access point, you must - // direct requests to the access point hostname. The access point hostname takes - // the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When - // using this action with an access point through the Amazon Web Services SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // Directory buckets - When you use this operation with a directory bucket, you + // must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string - // ContinuationToken indicates Amazon S3 that the list is being continued on this - // bucket with a token. ContinuationToken is obfuscated and is not a real key. + // ContinuationToken indicates to Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key. You can use this ContinuationToken for pagination of the list results. ContinuationToken *string - // A delimiter is a character you use to group keys. + // A delimiter is a character that you use to group keys. + // - Directory buckets - For directory buckets, / is the only supported + // delimiter. + // - Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. For + // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) + // in the Amazon S3 User Guide. Delimiter *string // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType types.EncodingType - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // The owner field is not present in listV2 by default, if you want to return owner - // field with each key in the result then set the fetch owner field to true. - FetchOwner bool - - // Sets the maximum number of keys returned in the response. By default the action - // returns up to 1,000 key names. The response might contain fewer keys but will - // never contain more. - MaxKeys int32 - - // Limits the response to keys that begin with the specified prefix. + // The owner field is not present in ListObjectsV2 by default. If you want to + // return the owner field with each key in the result, then set the FetchOwner + // field to true . Directory buckets - For directory buckets, the bucket owner is + // returned as the object owner for all objects. + FetchOwner *bool + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. + MaxKeys *int32 + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. This functionality is not supported + // for directory buckets. + OptionalObjectAttributes []types.OptionalObjectAttributes + + // Limits the response to keys that begin with the specified prefix. Directory + // buckets - For directory buckets, only prefixes that end in a delimiter ( / ) are + // supported. Prefix *string // Confirms that the requester knows that she or he will be charged for the list // objects request in V2 style. Bucket owners need not specify this parameter in - // their requests. + // their requests. This functionality is not supported for directory buckets. RequestPayer types.RequestPayer // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts - // listing after this specified key. StartAfter can be any key in the bucket. + // listing after this specified key. StartAfter can be any key in the bucket. This + // functionality is not supported for directory buckets. StartAfter *string noSmithyDocumentSerde } +func (in *ListObjectsV2Input) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Prefix = in.Prefix + +} + type ListObjectsV2Output struct { - // All of the keys (up to 1,000) rolled up into a common prefix count as a single - // return when calculating the number of returns. A response can contain - // CommonPrefixes only if you specify a delimiter. CommonPrefixes contains all (if - // there are any) keys between Prefix and the next occurrence of the string - // specified by a delimiter. CommonPrefixes lists keys that act like subdirectories - // in the directory specified by Prefix. For example, if the prefix is notes/ and - // the delimiter is a slash (/) as in notes/summer/july, the common prefix is - // notes/summer/. All of the keys that roll up into a common prefix count as a - // single return when calculating the number of returns. + // All of the keys (up to 1,000) that share the same prefix are grouped together. + // When counting the total numbers of returns by this API operation, this group of + // keys is considered as one item. A response can contain CommonPrefixes only if + // you specify a delimiter. CommonPrefixes contains all (if there are any) keys + // between Prefix and the next occurrence of the string specified by a delimiter. + // CommonPrefixes lists keys that act like subdirectories in the directory + // specified by Prefix . For example, if the prefix is notes/ and the delimiter is + // a slash ( / ) as in notes/summer/july , the common prefix is notes/summer/ . All + // of the keys that roll up into a common prefix count as a single return when + // calculating the number of returns. + // - Directory buckets - For directory buckets, only prefixes that end in a + // delimiter ( / ) are supported. + // - Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. For + // more information about multipart uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) + // in the Amazon S3 User Guide. CommonPrefixes []types.CommonPrefix // Metadata about each object returned. Contents []types.Object // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + // You can use this ContinuationToken for pagination of the list results. ContinuationToken *string // Causes keys that contain the same string between the prefix and the first // occurrence of the delimiter to be rolled up into a single result element in the // CommonPrefixes collection. These rolled-up keys are not returned elsewhere in // the response. Each rolled-up result counts as only one return against the - // MaxKeys value. + // MaxKeys value. Directory buckets - For directory buckets, / is the only + // supported delimiter. Delimiter *string // Encoding type used by Amazon S3 to encode object key names in the XML response. // If you specify the encoding-type request parameter, Amazon S3 includes this // element in the response, and returns encoded key name values in the following - // response elements: Delimiter, Prefix, Key, and StartAfter. + // response elements: Delimiter, Prefix, Key, and StartAfter . EncodingType types.EncodingType // Set to false if all of the results were returned. Set to true if more keys are - // available to return. If the number of results exceeds that specified by MaxKeys, - // all of the results might not be returned. - IsTruncated bool + // available to return. If the number of results exceeds that specified by MaxKeys + // , all of the results might not be returned. + IsTruncated *bool // KeyCount is the number of keys returned with this request. KeyCount will always - // be less than or equals to MaxKeys field. Say you ask for 50 keys, your result - // will include less than equals 50 keys - KeyCount int32 - - // Sets the maximum number of keys returned in the response. By default the action - // returns up to 1,000 key names. The response might contain fewer keys but will - // never contain more. - MaxKeys int32 - - // The bucket name. When using this action with an access point, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // be less than or equal to the MaxKeys field. For example, if you ask for 50 + // keys, your result will include 50 keys or fewer. + KeyCount *int32 + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. + MaxKeys *int32 + + // The bucket name. Name *string // NextContinuationToken is sent when isTruncated is true, which means there are // more keys in the bucket that can be listed. The next list requests to Amazon S3 - // can be continued with this NextContinuationToken. NextContinuationToken is + // can be continued with this NextContinuationToken . NextContinuationToken is // obfuscated and is not a real key NextContinuationToken *string - // Keys that begin with the indicated prefix. + // Keys that begin with the indicated prefix. Directory buckets - For directory + // buckets, only prefixes that end in a delimiter ( / ) are supported. Prefix *string - // If StartAfter was sent with the request, it is included in the response. + // If present, indicates that the requester was successfully charged for the + // request. This functionality is not supported for directory buckets. + RequestCharged types.RequestCharged + + // If StartAfter was sent with the request, it is included in the response. This + // functionality is not supported for directory buckets. StartAfter *string // Metadata pertaining to the operation's result. @@ -208,6 +256,9 @@ type ListObjectsV2Output struct { } func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListObjectsV2{}, middleware.After) if err != nil { return err @@ -216,6 +267,13 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListObjectsV2"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -234,16 +292,13 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -252,7 +307,10 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpListObjectsV2ValidationMiddleware(stack); err != nil { @@ -264,6 +322,9 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListObjectsV2UpdateEndpoint(stack, options); err != nil { return err } @@ -279,9 +340,22 @@ func (c *Client) addOperationListObjectsV2Middlewares(stack *middleware.Stack, o if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *ListObjectsV2Input) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + // ListObjectsV2APIClient is a client that implements the ListObjectsV2 operation. type ListObjectsV2APIClient interface { ListObjectsV2(context.Context, *ListObjectsV2Input, ...func(*Options)) (*ListObjectsV2Output, error) @@ -291,9 +365,9 @@ var _ ListObjectsV2APIClient = (*Client)(nil) // ListObjectsV2PaginatorOptions is the paginator options for ListObjectsV2 type ListObjectsV2PaginatorOptions struct { - // Sets the maximum number of keys returned in the response. By default the action - // returns up to 1,000 key names. The response might contain fewer keys but will - // never contain more. + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys but + // will never contain more. Limit int32 // Set to true if pagination should stop if the service returns a pagination token @@ -317,8 +391,8 @@ func NewListObjectsV2Paginator(client ListObjectsV2APIClient, params *ListObject } options := ListObjectsV2PaginatorOptions{} - if params.MaxKeys != 0 { - options.Limit = params.MaxKeys + if params.MaxKeys != nil { + options.Limit = *params.MaxKeys } for _, fn := range optFns { @@ -348,7 +422,11 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O params := *p.params params.ContinuationToken = p.nextToken - params.MaxKeys = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxKeys = limit result, err := p.client.ListObjectsV2(ctx, ¶ms, optFns...) if err != nil { @@ -358,7 +436,7 @@ func (p *ListObjectsV2Paginator) NextPage(ctx context.Context, optFns ...func(*O prevToken := p.nextToken p.nextToken = nil - if result.IsTruncated { + if result.IsTruncated != nil && *result.IsTruncated { p.nextToken = result.NextContinuationToken } @@ -376,7 +454,6 @@ func newServiceMetadataMiddleware_opListObjectsV2(region string) *awsmiddleware. return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListObjectsV2", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go index 36675dcd..11b0d59b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go @@ -14,48 +14,55 @@ import ( "time" ) -// Lists the parts that have been uploaded for a specific multipart upload. This -// operation must include the upload ID, which you obtain by sending the initiate -// multipart upload request (see CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). -// This request returns a maximum of 1,000 uploaded parts. The default number of -// parts returned is 1,000 parts. You can restrict the number of parts returned by -// specifying the max-parts request parameter. If your multipart upload consists of -// more than 1,000 parts, the response returns an IsTruncated field with the value -// of true, and a NextPartNumberMarker element. In subsequent ListParts requests -// you can include the part-number-marker query string parameter and set its value -// to the NextPartNumberMarker field value from the previous response. If the -// upload was created using a checksum algorithm, you will need to have permission -// to the kms:Decrypt action for the request to succeed. For more information on -// multipart uploads, see Uploading Objects Using Multipart Upload -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For -// information on permissions required to use the multipart upload API, see -// Multipart Upload and Permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). The -// following operations are related to ListParts: +// Lists the parts that have been uploaded for a specific multipart upload. To use +// this operation, you must provide the upload ID in the request. You obtain this +// uploadID by sending the initiate multipart upload request through +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// . The ListParts request returns a maximum of 1,000 uploaded parts. The limit of +// 1,000 parts is also the default value. You can restrict the number of parts in a +// response by specifying the max-parts request parameter. If your multipart +// upload consists of more than 1,000 parts, the response returns an IsTruncated +// field with the value of true , and a NextPartNumberMarker element. To list +// remaining uploaded parts, in subsequent ListParts requests, include the +// part-number-marker query string parameter and set its value to the +// NextPartNumberMarker field value from the previous response. For more +// information on multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. If the upload was created using server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must +// have permission to the kms:Decrypt action for the ListParts request to +// succeed. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// * -// UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// * -// CompleteMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// * -// AbortMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// * -// GetObjectAttributes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) -// -// * -// ListMultipartUploads -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to ListParts : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns ...func(*Options)) (*ListPartsOutput, error) { if params == nil { params = &ListPartsInput{} @@ -73,23 +80,31 @@ func (c *Client) ListParts(ctx context.Context, params *ListPartsInput, optFns . type ListPartsInput struct { - // The name of the bucket to which the parts are being uploaded. When using this - // action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The name of the bucket to which the parts are being uploaded. Directory buckets + // - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -104,66 +119,75 @@ type ListPartsInput struct { // This member is required. UploadId *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Sets the maximum number of parts to return. - MaxParts int32 + MaxParts *int32 - // Specifies the part after which listing should begin. Only parts with higher part - // numbers will be listed. + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. PartNumberMarker *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) - // in the Amazon S3 User Guide. + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde } +func (in *ListPartsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type ListPartsOutput struct { - // If the bucket has a lifecycle rule configured with an action to abort incomplete - // multipart uploads and the prefix in the lifecycle rule matches the object name - // in the request, then the response includes this header indicating when the - // initiated multipart upload will become eligible for abort operation. For more - // information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle - // Policy - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). - // The response will also include the x-amz-abort-rule-id header that will provide - // the ID of the lifecycle configuration rule that defines this action. + // If the bucket has a lifecycle rule configured with an action to abort + // incomplete multipart uploads and the prefix in the lifecycle rule matches the + // object name in the request, then the response includes this header indicating + // when the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // . The response will also include the x-amz-abort-rule-id header that will + // provide the ID of the lifecycle configuration rule that defines this action. + // This functionality is not supported for directory buckets. AbortDate *time.Time // This header is returned along with the x-amz-abort-date header. It identifies // applicable lifecycle configuration rule that defines the action to abort - // incomplete multipart uploads. + // incomplete multipart uploads. This functionality is not supported for directory + // buckets. AbortRuleId *string // The name of the bucket to which the multipart upload was initiated. Does not @@ -182,22 +206,23 @@ type ListPartsOutput struct { // Indicates whether the returned list of parts is truncated. A true value // indicates that the list was truncated. A list can be truncated if the number of // parts exceeds the limit returned in the MaxParts element. - IsTruncated bool + IsTruncated *bool // Object key for which the multipart upload was initiated. Key *string // Maximum number of parts that were allowed in the response. - MaxParts int32 + MaxParts *int32 // When a list is truncated, this element specifies the last part in the list, as // well as the value to use for the part-number-marker request parameter in a // subsequent request. NextPartNumberMarker *string - // Container element that identifies the object owner, after the object is created. - // If multipart upload is initiated by an IAM user, this element provides the - // parent account ID and display name. + // Container element that identifies the object owner, after the object is + // created. If multipart upload is initiated by an IAM user, this element provides + // the parent account ID and display name. Directory buckets - The bucket owner is + // returned as the object owner for all the parts. Owner *types.Owner // When a list is truncated, this element specifies the last part in the list, as @@ -205,16 +230,17 @@ type ListPartsOutput struct { // subsequent request. PartNumberMarker *string - // Container for elements related to a particular part. A response can contain zero - // or more Part elements. + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. Parts []types.Part // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // Class of storage (STANDARD or REDUCED_REDUNDANCY) used to store the uploaded - // object. + // The class of storage used to store the uploaded object. Directory buckets - + // Only the S3 Express One Zone storage class is supported by directory buckets to + // store objects. StorageClass types.StorageClass // Upload ID identifying the multipart upload whose parts are being listed. @@ -227,6 +253,9 @@ type ListPartsOutput struct { } func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpListParts{}, middleware.After) if err != nil { return err @@ -235,6 +264,13 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "ListParts"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -253,16 +289,13 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -271,7 +304,10 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpListPartsValidationMiddleware(stack); err != nil { @@ -283,6 +319,9 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addListPartsUpdateEndpoint(stack, options); err != nil { return err } @@ -298,9 +337,22 @@ func (c *Client) addOperationListPartsMiddlewares(stack *middleware.Stack, optio if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *ListPartsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + // ListPartsAPIClient is a client that implements the ListParts operation. type ListPartsAPIClient interface { ListParts(context.Context, *ListPartsInput, ...func(*Options)) (*ListPartsOutput, error) @@ -334,8 +386,8 @@ func NewListPartsPaginator(client ListPartsAPIClient, params *ListPartsInput, op } options := ListPartsPaginatorOptions{} - if params.MaxParts != 0 { - options.Limit = params.MaxParts + if params.MaxParts != nil { + options.Limit = *params.MaxParts } for _, fn := range optFns { @@ -365,7 +417,11 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio params := *p.params params.PartNumberMarker = p.nextToken - params.MaxParts = p.options.Limit + var limit *int32 + if p.options.Limit > 0 { + limit = &p.options.Limit + } + params.MaxParts = limit result, err := p.client.ListParts(ctx, ¶ms, optFns...) if err != nil { @@ -375,7 +431,7 @@ func (p *ListPartsPaginator) NextPage(ctx context.Context, optFns ...func(*Optio prevToken := p.nextToken p.nextToken = nil - if result.IsTruncated { + if result.IsTruncated != nil && *result.IsTruncated { p.nextToken = result.NextPartNumberMarker } @@ -393,7 +449,6 @@ func newServiceMetadataMiddleware_opListParts(region string) *awsmiddleware.Regi return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "ListParts", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go index 7875798f..80344efb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAccelerateConfiguration.go @@ -4,52 +4,41 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer -// Acceleration is a bucket-level feature that enables you to perform faster data -// transfers to Amazon S3. To use this operation, you must have permission to -// perform the s3:PutAccelerateConfiguration action. The bucket owner has this -// permission by default. The bucket owner can grant this permission to others. For -// more information about permissions, see Permissions Related to Bucket -// Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// The Transfer Acceleration state of a bucket can be set to one of the following +// This operation is not supported by directory buckets. Sets the accelerate +// configuration of an existing bucket. Amazon S3 Transfer Acceleration is a +// bucket-level feature that enables you to perform faster data transfers to Amazon +// S3. To use this operation, you must have permission to perform the +// s3:PutAccelerateConfiguration action. The bucket owner has this permission by +// default. The bucket owner can grant this permission to others. For more +// information about permissions, see Permissions Related to Bucket Subresource +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . The Transfer Acceleration state of a bucket can be set to one of the following // two values: +// - Enabled – Enables accelerated data transfers to the bucket. +// - Suspended – Disables accelerated data transfers to the bucket. // -// * Enabled – Enables accelerated data transfers to the bucket. -// -// * -// Suspended – Disables accelerated data transfers to the bucket. -// -// The -// GetBucketAccelerateConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) // action returns the transfer acceleration state of a bucket. After setting the // Transfer Acceleration state of a bucket to Enabled, it might take up to thirty // minutes before the data transfer rates to the bucket increase. The name of the // bucket used for Transfer Acceleration must be DNS-compliant and must not contain // periods ("."). For more information about transfer acceleration, see Transfer -// Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). -// The following operations are related to PutBucketAccelerateConfiguration: -// -// * -// GetBucketAccelerateConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// -// * -// CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// . The following operations are related to PutBucketAccelerateConfiguration : +// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) func (c *Client) PutBucketAccelerateConfiguration(ctx context.Context, params *PutBucketAccelerateConfigurationInput, optFns ...func(*Options)) (*PutBucketAccelerateConfigurationOutput, error) { if params == nil { params = &PutBucketAccelerateConfigurationInput{} @@ -77,25 +66,29 @@ type PutBucketAccelerateConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketAccelerateConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -104,6 +97,9 @@ type PutBucketAccelerateConfigurationOutput struct { } func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAccelerateConfiguration{}, middleware.After) if err != nil { return err @@ -112,6 +108,13 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAccelerateConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -130,16 +133,13 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -148,7 +148,10 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketAccelerateConfigurationValidationMiddleware(stack); err != nil { @@ -160,6 +163,9 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketAccelerateConfigurationInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -178,14 +184,26 @@ func (c *Client) addOperationPutBucketAccelerateConfigurationMiddlewares(stack * if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *PutBucketAccelerateConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketAccelerateConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketAccelerateConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go index bc28a996..6382d276 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAcl.go @@ -4,156 +4,100 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the permissions on an existing bucket using access control lists (ACL). For -// more information, see Using ACLs -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). To set -// the ACL of a bucket, you must have WRITE_ACP permission. You can use one of the -// following two ways to set a bucket's permissions: -// -// * Specify the ACL in the -// request body -// -// * Specify permissions using request headers -// -// You cannot specify -// access permission using both the body and the request headers. Depending on your -// application needs, you may choose to set the ACL on a bucket using either the -// request body or the headers. For example, if you have an existing application -// that updates a bucket ACL using the request body, then you can continue to use -// that approach. If your bucket uses the bucket owner enforced setting for S3 -// Object Ownership, ACLs are disabled and no longer affect permissions. You must -// use policies to grant access to your bucket and the objects in it. Requests to -// set ACLs or update ACLs fail and return the AccessControlListNotSupported error -// code. Requests to read ACLs are still supported. For more information, see -// Controlling object ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Access Permissions You can set access permissions -// using one of the following methods: -// -// * Specify a canned ACL with the x-amz-acl -// request header. Amazon S3 supports a set of predefined ACLs, known as canned -// ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify -// the canned ACL name as the value of x-amz-acl. If you use this header, you -// cannot use other access control-specific headers in your request. For more -// information, see Canned ACL -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * -// Specify access permissions explicitly with the x-amz-grant-read, -// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control -// headers. When using these headers, you specify explicit access permissions and -// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the -// permission. If you use these ACL-specific headers, you cannot use the x-amz-acl -// header to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see Access Control List -// (ACL) Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify -// each grantee as a type=value pair, where the type is one of the following: -// -// * id -// – if the value specified is the canonical user ID of an Amazon Web Services -// account -// -// * uri – if you are granting permissions to a predefined group -// -// * -// emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// -// * US East (N. Virginia) -// -// * US West -// (N. California) -// -// * US West (Oregon) -// -// * Asia Pacific (Singapore) -// -// * Asia Pacific -// (Sydney) -// -// * Asia Pacific (Tokyo) -// -// * Europe (Ireland) -// -// * South America (São -// Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see -// Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the -// Amazon Web Services General Reference. -// -// For example, the following -// x-amz-grant-write header grants create, overwrite, and delete objects permission -// to LogDelivery group predefined by Amazon S3 and two Amazon Web Services -// accounts identified by their email addresses. x-amz-grant-write: -// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", -// id="555566667777" -// -// You can use either a canned ACL or specify access permissions -// explicitly. You cannot do both. Grantee Values You can specify the person -// (grantee) to whom you're assigning access rights (using request elements) in the -// following ways: -// -// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is -// optional and ignored in the request -// -// * By URI: -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// * By Email -// address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the -// CanonicalUser and, in a response to a GET Object acl request, appears as the -// CanonicalUser. Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// -// * US East (N. Virginia) -// -// * US West -// (N. California) -// -// * US West (Oregon) -// -// * Asia Pacific (Singapore) -// -// * Asia Pacific -// (Sydney) -// -// * Asia Pacific (Tokyo) -// -// * Europe (Ireland) -// -// * South America (São -// Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see -// Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the -// Amazon Web Services General Reference. -// -// # Related Resources -// -// * CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// * -// DeleteBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// -// * -// GetObjectAcl -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// This operation is not supported by directory buckets. Sets the permissions on +// an existing bucket using access control lists (ACL). For more information, see +// Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// . To set the ACL of a bucket, you must have the WRITE_ACP permission. You can +// use one of the following two ways to set a bucket's permissions: +// - Specify the ACL in the request body +// - Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. Depending on your application needs, you may choose to set the ACL on a +// bucket using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then you +// can continue to use that approach. If your bucket uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect +// permissions. You must use policies to grant access to your bucket and the +// objects in it. Requests to set ACLs or update ACLs fail and return the +// AccessControlListNotSupported error code. Requests to read ACLs are still +// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Permissions You can set access permissions by using +// one of the following methods: +// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a +// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined +// set of grantees and permissions. Specify the canned ACL name as the value of +// x-amz-acl . If you use this header, you cannot use other access +// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) +// . +// - Specify access permissions explicitly with the x-amz-grant-read , +// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control +// headers. When using these headers, you specify explicit access permissions and +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use the +// x-amz-acl header to set a canned ACL. These parameters map to the set of +// permissions that Amazon S3 supports in an ACL. For more information, see +// Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// . You specify each grantee as a type=value pair, where the type is one of the +// following: +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// - uri – if you are granting permissions to a predefined group +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// - US East (N. Virginia) +// - US West (N. California) +// - US West (Oregon) +// - Asia Pacific (Singapore) +// - Asia Pacific (Sydney) +// - Asia Pacific (Tokyo) +// - Europe (Ireland) +// - South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-write header grants create, overwrite, and delete objects +// permission to LogDelivery group predefined by Amazon S3 and two Amazon Web +// Services accounts identified by their email addresses. x-amz-grant-write: +// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", +// id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. You +// cannot do both. Grantee Values You can specify the person (grantee) to whom +// you're assigning access rights (using request elements) in the following ways: +// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and +// ignored in the request +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// - By Email address: <>Grantees@email.com<>& The grantee is resolved to the +// CanonicalUser and, in a response to a GET Object acl request, appears as the +// CanonicalUser. Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// - US East (N. Virginia) +// - US West (N. California) +// - US West (Oregon) +// - Asia Pacific (Singapore) +// - Asia Pacific (Sydney) +// - Asia Pacific (Tokyo) +// - Europe (Ireland) +// - South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. +// +// The following operations are related to PutBucketAcl : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) func (c *Client) PutBucketAcl(ctx context.Context, params *PutBucketAclInput, optFns ...func(*Options)) (*PutBucketAclOutput, error) { if params == nil { params = &PutBucketAclInput{} @@ -182,28 +126,26 @@ type PutBucketAclInput struct { // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *types.AccessControlPolicy - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a - // message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. - // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Allows grantee the read, write, read ACP, and write ACP permissions on the @@ -227,6 +169,11 @@ type PutBucketAclInput struct { noSmithyDocumentSerde } +func (in *PutBucketAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketAclOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -235,6 +182,9 @@ type PutBucketAclOutput struct { } func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAcl{}, middleware.After) if err != nil { return err @@ -243,6 +193,13 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAcl"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -261,16 +218,13 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -279,7 +233,10 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketAclValidationMiddleware(stack); err != nil { @@ -291,6 +248,9 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketAclInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -309,14 +269,29 @@ func (c *Client) addOperationPutBucketAclMiddlewares(stack *middleware.Stack, op if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketAclInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketAcl(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketAcl", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go index da70d547..9b1009f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketAnalyticsConfiguration.go @@ -4,78 +4,55 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets an analytics configuration for the bucket (specified by the analytics -// configuration ID). You can have up to 1,000 analytics configurations per bucket. -// You can choose to have storage class analysis export analysis reports sent to a -// comma-separated values (CSV) flat file. See the DataExport request element. -// Reports are updated daily and are based on the object filters that you -// configure. When selecting data export, you specify a destination bucket and an -// optional destination prefix where the file is written. You can export the data -// to a destination bucket in a different account. However, the destination bucket -// must be in the same Region as the bucket that you are making the PUT analytics -// configuration to. For more information, see Amazon S3 Analytics – Storage Class -// Analysis -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). -// You must create a bucket policy on the destination bucket where the exported +// This operation is not supported by directory buckets. Sets an analytics +// configuration for the bucket (specified by the analytics configuration ID). You +// can have up to 1,000 analytics configurations per bucket. You can choose to have +// storage class analysis export analysis reports sent to a comma-separated values +// (CSV) flat file. See the DataExport request element. Reports are updated daily +// and are based on the object filters that you configure. When selecting data +// export, you specify a destination bucket and an optional destination prefix +// where the file is written. You can export the data to a destination bucket in a +// different account. However, the destination bucket must be in the same Region as +// the bucket that you are making the PUT analytics configuration to. For more +// information, see Amazon S3 Analytics – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// . You must create a bucket policy on the destination bucket where the exported // file is written to grant permissions to Amazon S3 to write objects to the // bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory -// and Storage Class Analysis -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). -// To use this operation, you must have permissions to perform the +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) +// . To use this operation, you must have permissions to perform the // s3:PutAnalyticsConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// Special Errors +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . PutBucketAnalyticsConfiguration has the following special errors: +// - HTTP Error: HTTP 400 Bad Request +// - Code: InvalidArgument +// - Cause: Invalid argument. +// - HTTP Error: HTTP 400 Bad Request +// - Code: TooManyConfigurations +// - Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// - HTTP Error: HTTP 403 Forbidden +// - Code: AccessDenied +// - Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutAnalyticsConfiguration bucket permission to set the configuration on +// the bucket. // -// * HTTP Error: HTTP 400 Bad Request -// -// * Code: InvalidArgument -// -// * -// Cause: Invalid argument. -// -// * HTTP Error: HTTP 400 Bad Request -// -// * Code: -// TooManyConfigurations -// -// * Cause: You are attempting to create a new configuration -// but have already reached the 1,000-configuration limit. -// -// * HTTP Error: HTTP 403 -// Forbidden -// -// * Code: AccessDenied -// -// * Cause: You are not the owner of the specified -// bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to -// set the configuration on the bucket. -// -// # Related Resources -// -// * -// GetBucketAnalyticsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) -// -// * -// DeleteBucketAnalyticsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) -// -// * -// ListBucketAnalyticsConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// The following operations are related to PutBucketAnalyticsConfiguration : +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) func (c *Client) PutBucketAnalyticsConfiguration(ctx context.Context, params *PutBucketAnalyticsConfigurationInput, optFns ...func(*Options)) (*PutBucketAnalyticsConfigurationOutput, error) { if params == nil { params = &PutBucketAnalyticsConfigurationInput{} @@ -108,14 +85,19 @@ type PutBucketAnalyticsConfigurationInput struct { // This member is required. Id *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketAnalyticsConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -124,6 +106,9 @@ type PutBucketAnalyticsConfigurationOutput struct { } func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketAnalyticsConfiguration{}, middleware.After) if err != nil { return err @@ -132,6 +117,13 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketAnalyticsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -150,16 +142,13 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -168,7 +157,10 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil { @@ -180,6 +172,9 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketAnalyticsConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -195,14 +190,26 @@ func (c *Client) addOperationPutBucketAnalyticsConfigurationMiddlewares(stack *m if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *PutBucketAnalyticsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketAnalyticsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketAnalyticsConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go index 55a67f18..394a2bad 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketCors.go @@ -4,22 +4,25 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the cors configuration for your bucket. If the configuration exists, Amazon -// S3 replaces it. To use this operation, you must be allowed to perform the -// s3:PutBucketCORS action. By default, the bucket owner has this permission and -// can grant it to others. You set this configuration on a bucket so that the -// bucket can service cross-origin requests. For example, you might want to enable -// a request whose origin is http://www.example.com to access your Amazon S3 bucket -// at my.example.bucket.com by using the browser's XMLHttpRequest capability. To +// This operation is not supported by directory buckets. Sets the cors +// configuration for your bucket. If the configuration exists, Amazon S3 replaces +// it. To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it to +// others. You set this configuration on a bucket so that the bucket can service +// cross-origin requests. For example, you might want to enable a request whose +// origin is http://www.example.com to access your Amazon S3 bucket at +// my.example.bucket.com by using the browser's XMLHttpRequest capability. To // enable cross-origin resource sharing (CORS) on a bucket, you add the cors // subresource to the bucket. The cors subresource is an XML document in which you // configure rules that identify origins and the HTTP methods that can be executed @@ -28,33 +31,19 @@ import ( // bucket, it evaluates the cors configuration on the bucket and uses the first // CORSRule rule that matches the incoming browser request to enable a cross-origin // request. For a rule to match, the following conditions must be met: +// - The request's Origin header must match AllowedOrigin elements. +// - The request method (for example, GET, PUT, HEAD, and so on) or the +// Access-Control-Request-Method header in case of a pre-flight OPTIONS request +// must be one of the AllowedMethod elements. +// - Every header specified in the Access-Control-Request-Headers request header +// of a pre-flight request must match an AllowedHeader element. // -// * The -// request's Origin header must match AllowedOrigin elements. -// -// * The request method -// (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method -// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod -// elements. -// -// * Every header specified in the Access-Control-Request-Headers -// request header of a pre-flight request must match an AllowedHeader element. -// -// For -// more information about CORS, go to Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 -// User Guide. Related Resources -// -// * GetBucketCors -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) -// -// * -// DeleteBucketCors -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) -// -// * -// RESTOPTIONSobject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. The following operations are related to +// PutBucketCors : +// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, optFns ...func(*Options)) (*PutBucketCorsOutput, error) { if params == nil { params = &PutBucketCorsInput{} @@ -72,46 +61,48 @@ func (c *Client) PutBucketCors(ctx context.Context, params *PutBucketCorsInput, type PutBucketCorsInput struct { - // Specifies the bucket impacted by the corsconfiguration. + // Specifies the bucket impacted by the cors configuration. // // This member is required. Bucket *string // Describes the cross-origin access configuration for objects in an Amazon S3 - // bucket. For more information, see Enabling Cross-Origin Resource Sharing - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 - // User Guide. + // bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) + // in the Amazon S3 User Guide. // // This member is required. CORSConfiguration *types.CORSConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a - // message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864. - // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864. (http://www.ietf.org/rfc/rfc1864.txt) + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketCorsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketCorsOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -120,6 +111,9 @@ type PutBucketCorsOutput struct { } func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketCors{}, middleware.After) if err != nil { return err @@ -128,6 +122,13 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketCors"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -146,16 +147,13 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -164,7 +162,10 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketCorsValidationMiddleware(stack); err != nil { @@ -176,6 +177,9 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketCorsInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -194,20 +198,35 @@ func (c *Client) addOperationPutBucketCorsMiddlewares(stack *middleware.Stack, o if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketCorsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketCors(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketCors", } } -// getPutBucketCorsRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. +// getPutBucketCorsRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. func getPutBucketCorsRequestAlgorithmMember(input interface{}) (string, bool) { in := input.(*PutBucketCorsInput) if len(in.ChecksumAlgorithm) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go index 184f0cd3..615e98a3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go @@ -4,47 +4,41 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This action uses the encryption subresource to configure default encryption and -// Amazon S3 Bucket Key for an existing bucket. Default encryption for a bucket can -// use server-side encryption with Amazon S3-managed keys (SSE-S3) or customer -// managed keys (SSE-KMS). If you specify default encryption using SSE-KMS, you can -// also configure Amazon S3 Bucket Key. When the default encryption is SSE-KMS, if -// you upload an object to the bucket and do not specify the KMS key to use for -// encryption, Amazon S3 uses the default Amazon Web Services managed KMS key for -// your account. For information about default encryption, see Amazon S3 default -// bucket encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the -// Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 -// Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in -// the Amazon S3 User Guide. This action requires Amazon Web Services Signature -// Version 4. For more information, see Authenticating Requests (Amazon Web -// Services Signature Version 4) -// (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). -// To use this operation, you must have permissions to perform the +// This operation is not supported by directory buckets. This action uses the +// encryption subresource to configure default encryption and Amazon S3 Bucket Keys +// for an existing bucket. By default, all buckets have a default encryption +// configuration that uses server-side encryption with Amazon S3 managed keys +// (SSE-S3). You can optionally configure default encryption for a bucket by using +// server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or +// dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). +// If you specify default encryption by using SSE-KMS, you can also configure +// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does +// not validate the KMS key ID provided in PutBucketEncryption requests. This +// action requires Amazon Web Services Signature Version 4. For more information, +// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// . To use this operation, you must have permission to perform the // s3:PutEncryptionConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more // information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. Related Resources -// -// * GetBucketEncryption -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) -// -// * -// DeleteBucketEncryption -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. The following operations are related to +// PutBucketEncryption : +// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncryptionInput, optFns ...func(*Options)) (*PutBucketEncryptionOutput, error) { if params == nil { params = &PutBucketEncryptionInput{} @@ -63,11 +57,13 @@ func (c *Client) PutBucketEncryption(ctx context.Context, params *PutBucketEncry type PutBucketEncryptionInput struct { // Specifies default encryption for a bucket using server-side encryption with - // Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For - // information about the Amazon S3 default encryption feature, see Amazon S3 - // Default Bucket Encryption - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) in the - // Amazon S3 User Guide. + // different key options. By default, all buckets have a default encryption + // configuration that uses server-side encryption with Amazon S3 managed keys + // (SSE-S3). You can optionally configure default encryption for a bucket by using + // server-side encryption with an Amazon Web Services KMS key (SSE-KMS) or a + // customer-provided key (SSE-C). For information about the bucket default + // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -77,13 +73,12 @@ type PutBucketEncryptionInput struct { // This member is required. ServerSideEncryptionConfiguration *types.ServerSideEncryptionConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -94,14 +89,19 @@ type PutBucketEncryptionInput struct { // automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketEncryptionOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -110,6 +110,9 @@ type PutBucketEncryptionOutput struct { } func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketEncryption{}, middleware.After) if err != nil { return err @@ -118,6 +121,13 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketEncryption"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -136,16 +146,13 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -154,7 +161,10 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketEncryptionValidationMiddleware(stack); err != nil { @@ -166,6 +176,9 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketEncryptionInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -184,20 +197,35 @@ func (c *Client) addOperationPutBucketEncryptionMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketEncryptionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketEncryption(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketEncryption", } } -// getPutBucketEncryptionRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. +// getPutBucketEncryptionRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. func getPutBucketEncryptionRequestAlgorithmMember(input interface{}) (string, bool) { in := input.(*PutBucketEncryptionInput) if len(in.ChecksumAlgorithm) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go index edf5d178..a5f8fc7a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketIntelligentTieringConfiguration.go @@ -4,16 +4,19 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can -// have up to 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 +// This operation is not supported by directory buckets. Puts a S3 +// Intelligent-Tiering configuration to the specified bucket. You can have up to +// 1,000 S3 Intelligent-Tiering configurations per bucket. The S3 // Intelligent-Tiering storage class is designed to optimize storage costs by // automatically moving data to the most cost-effective storage access tier, // without performance impact or operational overhead. S3 Intelligent-Tiering @@ -26,51 +29,22 @@ import ( // monitored and not eligible for auto-tiering. Smaller objects can be stored, but // they are always charged at the Frequent Access tier rates in the S3 // Intelligent-Tiering storage class. For more information, see Storage class for -// automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). -// Operations related to PutBucketIntelligentTieringConfiguration include: +// automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . Operations related to PutBucketIntelligentTieringConfiguration include: +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) // -// * -// DeleteBucketIntelligentTieringConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) -// -// * -// GetBucketIntelligentTieringConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) -// -// * -// ListBucketIntelligentTieringConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) -// -// You -// only need S3 Intelligent-Tiering enabled on a bucket if you want to +// You only need S3 Intelligent-Tiering enabled on a bucket if you want to // automatically move objects stored in the S3 Intelligent-Tiering storage class to -// the Archive Access or Deep Archive Access tier. Special Errors -// -// * HTTP 400 Bad -// Request Error -// -// * Code: InvalidArgument -// -// * Cause: Invalid Argument -// -// * HTTP 400 -// Bad Request Error -// -// * Code: TooManyConfigurations -// -// * Cause: You are attempting to -// create a new configuration but have already reached the 1,000-configuration -// limit. -// -// * HTTP 403 Forbidden Error -// -// * Code: AccessDenied -// -// * Cause: You are not -// the owner of the specified bucket, or you do not have the -// s3:PutIntelligentTieringConfiguration bucket permission to set the configuration -// on the bucket. +// the Archive Access or Deep Archive Access tier. +// PutBucketIntelligentTieringConfiguration has the following special errors: HTTP +// 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument HTTP 400 Bad +// Request Error Code: TooManyConfigurations Cause: You are attempting to create a +// new configuration but have already reached the 1,000-configuration limit. HTTP +// 403 Forbidden Error Cause: You are not the owner of the specified bucket, or you +// do not have the s3:PutIntelligentTieringConfiguration bucket permission to set +// the configuration on the bucket. func (c *Client) PutBucketIntelligentTieringConfiguration(ctx context.Context, params *PutBucketIntelligentTieringConfigurationInput, optFns ...func(*Options)) (*PutBucketIntelligentTieringConfigurationOutput, error) { if params == nil { params = &PutBucketIntelligentTieringConfigurationInput{} @@ -107,6 +81,11 @@ type PutBucketIntelligentTieringConfigurationInput struct { noSmithyDocumentSerde } +func (in *PutBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketIntelligentTieringConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -115,6 +94,9 @@ type PutBucketIntelligentTieringConfigurationOutput struct { } func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration{}, middleware.After) if err != nil { return err @@ -123,6 +105,13 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketIntelligentTieringConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -141,16 +130,13 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -159,7 +145,10 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil { @@ -171,6 +160,9 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketIntelligentTieringConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -186,14 +178,26 @@ func (c *Client) addOperationPutBucketIntelligentTieringConfigurationMiddlewares if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *PutBucketIntelligentTieringConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketIntelligentTieringConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketIntelligentTieringConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go index bb1a903b..e2d06796 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketInventoryConfiguration.go @@ -4,79 +4,58 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// This implementation of the PUT action adds an inventory configuration -// (identified by the inventory ID) to the bucket. You can have up to 1,000 -// inventory configurations per bucket. Amazon S3 inventory generates inventories -// of the objects in the bucket on a daily or weekly basis, and the results are -// published to a flat file. The bucket that is inventoried is called the source -// bucket, and the bucket where the inventory flat file is stored is called the -// destination bucket. The destination bucket must be in the same Amazon Web -// Services Region as the source bucket. When you configure an inventory for a -// source bucket, you specify the destination bucket where you want the inventory -// to be stored, and whether to generate the inventory daily or weekly. You can -// also configure what object metadata to include and whether to inventory all -// object versions or only current versions. For more information, see Amazon S3 -// Inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) in the -// Amazon S3 User Guide. You must create a bucket policy on the destination bucket -// to grant permissions to Amazon S3 to write objects to the bucket in the defined -// location. For an example policy, see Granting Permissions for Amazon S3 -// Inventory and Storage Class Analysis -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). -// To use this operation, you must have permissions to perform the +// This operation is not supported by directory buckets. This implementation of +// the PUT action adds an inventory configuration (identified by the inventory ID) +// to the bucket. You can have up to 1,000 inventory configurations per bucket. +// Amazon S3 inventory generates inventories of the objects in the bucket on a +// daily or weekly basis, and the results are published to a flat file. The bucket +// that is inventoried is called the source bucket, and the bucket where the +// inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same Amazon Web Services Region as the source bucket. When +// you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate the +// inventory daily or weekly. You can also configure what object metadata to +// include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon S3 User Guide. You must create a bucket policy on the destination +// bucket to grant permissions to Amazon S3 to write objects to the bucket in the +// defined location. For an example policy, see Granting Permissions for Amazon S3 +// Inventory and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) +// . Permissions To use this operation, you must have permission to perform the // s3:PutInventoryConfiguration action. The bucket owner has this permission by -// default and can grant this permission to others. For more information about -// permissions, see Permissions Related to Bucket Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. Special Errors -// -// * HTTP 400 Bad Request Error -// -// * -// Code: InvalidArgument -// -// * Cause: Invalid Argument -// -// * HTTP 400 Bad Request -// Error -// -// * Code: TooManyConfigurations -// -// * Cause: You are attempting to create a -// new configuration but have already reached the 1,000-configuration limit. -// -// * -// HTTP 403 Forbidden Error -// -// * Code: AccessDenied -// -// * Cause: You are not the owner +// default and can grant this permission to others. The +// s3:PutInventoryConfiguration permission allows a user to create an S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) +// report that includes all object metadata fields available and to specify the +// destination bucket to store the inventory. A user with read access to objects in +// the destination bucket can also access all object metadata fields that are +// available in the inventory report. To restrict access to an inventory report, +// see Restricting access to an Amazon S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) +// in the Amazon S3 User Guide. For more information about the metadata fields +// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) +// in the Amazon S3 User Guide. For more information about permissions, see +// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. PutBucketInventoryConfiguration has the following +// special errors: HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid +// Argument HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. HTTP 403 Forbidden Error Cause: You are not the owner // of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket. -// -// # Related Resources -// -// * -// GetBucketInventoryConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) -// -// * -// DeleteBucketInventoryConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) -// -// * -// ListBucketInventoryConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// bucket permission to set the configuration on the bucket. The following +// operations are related to PutBucketInventoryConfiguration : +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) func (c *Client) PutBucketInventoryConfiguration(ctx context.Context, params *PutBucketInventoryConfigurationInput, optFns ...func(*Options)) (*PutBucketInventoryConfigurationOutput, error) { if params == nil { params = &PutBucketInventoryConfigurationInput{} @@ -109,14 +88,19 @@ type PutBucketInventoryConfigurationInput struct { // This member is required. InventoryConfiguration *types.InventoryConfiguration - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketInventoryConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -125,6 +109,9 @@ type PutBucketInventoryConfigurationOutput struct { } func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketInventoryConfiguration{}, middleware.After) if err != nil { return err @@ -133,6 +120,13 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketInventoryConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -151,16 +145,13 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -169,7 +160,10 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketInventoryConfigurationValidationMiddleware(stack); err != nil { @@ -181,6 +175,9 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketInventoryConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -196,14 +193,26 @@ func (c *Client) addOperationPutBucketInventoryConfigurationMiddlewares(stack *m if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *PutBucketInventoryConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketInventoryConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketInventoryConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go index ca79b24e..ac2b63eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLifecycleConfiguration.go @@ -4,86 +4,68 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a new lifecycle configuration for the bucket or replaces an existing -// lifecycle configuration. Keep in mind that this will overwrite an existing -// lifecycle configuration, so if you want to retain any configuration details, -// they must be included in the new lifecycle configuration. For information about -// lifecycle configuration, see Managing your storage lifecycle -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). -// Bucket lifecycle configuration now supports specifying a lifecycle rule using an -// object key name prefix, one or more object tags, or a combination of both. +// This operation is not supported by directory buckets. Creates a new lifecycle +// configuration for the bucket or replaces an existing lifecycle configuration. +// Keep in mind that this will overwrite an existing lifecycle configuration, so if +// you want to retain any configuration details, they must be included in the new +// lifecycle configuration. For information about lifecycle configuration, see +// Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// . Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, or a combination of both. // Accordingly, this section describes the latest API. The previous version of the // API supported filtering based only on an object key name prefix, which is // supported for backward compatibility. For the related API description, see -// PutBucketLifecycle -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). -// Rules You specify the lifecycle configuration in your request body. The +// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// . Rules You specify the lifecycle configuration in your request body. The // lifecycle configuration is specified as XML consisting of one or more rules. An // Amazon S3 Lifecycle configuration can have up to 1,000 rules. This limit is not // adjustable. Each rule consists of the following: // -// * Filter identifying a subset -// of objects to which the rule applies. The filter can be based on a key name -// prefix, object tags, or a combination of both. +// - A filter identifying a subset of objects to which the rule applies. The +// filter can be based on a key name prefix, object tags, or a combination of both. // -// * Status whether the rule is in -// effect. +// - A status indicating whether the rule is in effect. // -// * One or more lifecycle transition and expiration actions that you want -// Amazon S3 to perform on the objects identified by the filter. If the state of -// your bucket is versioning-enabled or versioning-suspended, you can have many -// versions of the same object (one current version and zero or more noncurrent -// versions). Amazon S3 provides predefined actions that you can specify for -// current and noncurrent object versions. +// - One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state of +// your bucket is versioning-enabled or versioning-suspended, you can have many +// versions of the same object (one current version and zero or more noncurrent +// versions). Amazon S3 provides predefined actions that you can specify for +// current and noncurrent object versions. // -// For more information, see Object -// Lifecycle Management -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) and -// Lifecycle Configuration Elements -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). -// Permissions By default, all Amazon S3 resources are private, including buckets, -// objects, and related subresources (for example, lifecycle configuration and -// website configuration). Only the resource owner (that is, the Amazon Web +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html) +// . Permissions By default, all Amazon S3 resources are private, including +// buckets, objects, and related subresources (for example, lifecycle configuration +// and website configuration). Only the resource owner (that is, the Amazon Web // Services account that created it) can access the resource. The resource owner // can optionally grant access permissions to others by writing an access policy. -// For this operation, a user must get the s3:PutLifecycleConfiguration permission. -// You can also explicitly deny permissions. Explicit deny also supersedes any -// other permissions. If you want to block users or accounts from removing or -// deleting objects from your bucket, you must deny them permissions for the -// following actions: +// For this operation, a user must get the s3:PutLifecycleConfiguration +// permission. You can also explicitly deny permissions. An explicit deny also +// supersedes any other permissions. If you want to block users or accounts from +// removing or deleting objects from your bucket, you must deny them permissions +// for the following actions: +// - s3:DeleteObject +// - s3:DeleteObjectVersion +// - s3:PutLifecycleConfiguration // -// * s3:DeleteObject -// -// * s3:DeleteObjectVersion -// -// * -// s3:PutLifecycleConfiguration -// -// For more information about permissions, see -// Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// The following are related to PutBucketLifecycleConfiguration: -// -// * Examples of -// Lifecycle Configuration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) -// -// * -// GetBucketLifecycleConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// -// * -// DeleteBucketLifecycle -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// For more information about permissions, see Managing Access Permissions to Your +// Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . The following operations are related to PutBucketLifecycleConfiguration : +// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) func (c *Client) PutBucketLifecycleConfiguration(ctx context.Context, params *PutBucketLifecycleConfigurationInput, optFns ...func(*Options)) (*PutBucketLifecycleConfigurationOutput, error) { if params == nil { params = &PutBucketLifecycleConfigurationInput{} @@ -106,20 +88,19 @@ type PutBucketLifecycleConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Container for lifecycle rules. You can add as many as 1,000 rules. @@ -128,6 +109,11 @@ type PutBucketLifecycleConfigurationInput struct { noSmithyDocumentSerde } +func (in *PutBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketLifecycleConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -136,6 +122,9 @@ type PutBucketLifecycleConfigurationOutput struct { } func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLifecycleConfiguration{}, middleware.After) if err != nil { return err @@ -144,6 +133,13 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLifecycleConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -162,16 +158,13 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -180,7 +173,10 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketLifecycleConfigurationValidationMiddleware(stack); err != nil { @@ -192,6 +188,9 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketLifecycleConfigurationInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -210,14 +209,29 @@ func (c *Client) addOperationPutBucketLifecycleConfigurationMiddlewares(stack *m if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketLifecycleConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketLifecycleConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketLifecycleConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go index 0f3ea6d3..e69fa24c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketLogging.go @@ -4,68 +4,50 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Set the logging parameters for a bucket and to specify permissions for who can -// view and modify the logging parameters. All logs are saved to buckets in the -// same Amazon Web Services Region as the source bucket. To set the logging status -// of a bucket, you must be the bucket owner. The bucket owner is automatically -// granted FULL_CONTROL to all logs. You use the Grantee request element to grant -// access to other people. The Permissions request element specifies the kind of -// access the grantee has to the logs. If the target bucket for log delivery uses -// the bucket owner enforced setting for S3 Object Ownership, you can't use the -// Grantee request element to grant access to others. Permissions can only be -// granted using policies. For more information, see Permissions for server access -// log delivery -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// This operation is not supported by directory buckets. Set the logging +// parameters for a bucket and to specify permissions for who can view and modify +// the logging parameters. All logs are saved to buckets in the same Amazon Web +// Services Region as the source bucket. To set the logging status of a bucket, you +// must be the bucket owner. The bucket owner is automatically granted FULL_CONTROL +// to all logs. You use the Grantee request element to grant access to other +// people. The Permissions request element specifies the kind of access the +// grantee has to the logs. If the target bucket for log delivery uses the bucket +// owner enforced setting for S3 Object Ownership, you can't use the Grantee +// request element to grant access to others. Permissions can only be granted using +// policies. For more information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) // in the Amazon S3 User Guide. Grantee Values You can specify the person (grantee) -// to whom you're assigning access rights (using request elements) in the following -// ways: +// to whom you're assigning access rights (by using request elements) in the +// following ways: +// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and +// ignored in the request. +// - By Email address: <>Grantees@email.com<> The grantee is resolved to the +// CanonicalUser and, in a response to a GETObjectAcl request, appears as the +// CanonicalUser. +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // -// * By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional -// and ignored in the request. -// -// * By Email address: <>Grantees@email.com<> The -// grantee is resolved to the CanonicalUser and, in a response to a GET Object acl -// request, appears as the CanonicalUser. -// -// * By URI: -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// To enable -// logging, you use LoggingEnabled and its children request elements. To disable -// logging, you use an empty BucketLoggingStatus request element: For more -// information about server access logging, see Server Access Logging -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) in the -// Amazon S3 User Guide. For more information about creating a bucket, see -// CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). For -// more information about returning the logging status of a bucket, see -// GetBucketLogging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). The -// following operations are related to PutBucketLogging: -// -// * PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// * -// DeleteBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// -// * -// CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// * -// GetBucketLogging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// To enable logging, you use LoggingEnabled and its children request elements. To +// disable logging, you use an empty BucketLoggingStatus request element: For +// more information about server access logging, see Server Access Logging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) +// in the Amazon S3 User Guide. For more information about creating a bucket, see +// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// . For more information about returning the logging status of a bucket, see +// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// . The following operations are related to PutBucketLogging : +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) func (c *Client) PutBucketLogging(ctx context.Context, params *PutBucketLoggingInput, optFns ...func(*Options)) (*PutBucketLoggingOutput, error) { if params == nil { params = &PutBucketLoggingInput{} @@ -93,13 +75,12 @@ type PutBucketLoggingInput struct { // This member is required. BucketLoggingStatus *types.BucketLoggingStatus - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -109,14 +90,19 @@ type PutBucketLoggingInput struct { // this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketLoggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketLoggingOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -125,6 +111,9 @@ type PutBucketLoggingOutput struct { } func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketLogging{}, middleware.After) if err != nil { return err @@ -133,6 +122,13 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketLogging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -151,16 +147,13 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -169,7 +162,10 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketLoggingValidationMiddleware(stack); err != nil { @@ -181,6 +177,9 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketLoggingInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -199,14 +198,29 @@ func (c *Client) addOperationPutBucketLoggingMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketLoggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketLogging(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketLogging", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go index 6f0c6fac..099736c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketMetricsConfiguration.go @@ -4,53 +4,39 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets a metrics configuration (specified by the metrics configuration ID) for the -// bucket. You can have up to 1,000 metrics configurations per bucket. If you're -// updating an existing metrics configuration, note that this is a full replacement -// of the existing metrics configuration. If you don't include the elements you -// want to keep, they are erased. To use this operation, you must have permissions -// to perform the s3:PutMetricsConfiguration action. The bucket owner has this +// This operation is not supported by directory buckets. Sets a metrics +// configuration (specified by the metrics configuration ID) for the bucket. You +// can have up to 1,000 metrics configurations per bucket. If you're updating an +// existing metrics configuration, note that this is a full replacement of the +// existing metrics configuration. If you don't include the elements you want to +// keep, they are erased. To use this operation, you must have permissions to +// perform the s3:PutMetricsConfiguration action. The bucket owner has this // permission by default. The bucket owner can grant this permission to others. For // more information about permissions, see Permissions Related to Bucket -// Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// For information about CloudWatch request metrics for Amazon S3, see Monitoring -// Metrics with Amazon CloudWatch -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). -// The following operations are related to PutBucketMetricsConfiguration: +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . For information about CloudWatch request metrics for Amazon S3, see +// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// . The following operations are related to PutBucketMetricsConfiguration : +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // -// * -// DeleteBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) -// -// * -// GetBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) -// -// * -// ListBucketMetricsConfigurations -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) -// -// GetBucketLifecycle -// has the following special error: -// -// * Error code: TooManyConfigurations -// -// * -// Description: You are attempting to create a new configuration but have already -// reached the 1,000-configuration limit. -// -// * HTTP Status Code: HTTP 400 Bad Request +// PutBucketMetricsConfiguration has the following special error: +// - Error code: TooManyConfigurations +// - Description: You are attempting to create a new configuration but have +// already reached the 1,000-configuration limit. +// - HTTP Status Code: HTTP 400 Bad Request func (c *Client) PutBucketMetricsConfiguration(ctx context.Context, params *PutBucketMetricsConfigurationInput, optFns ...func(*Options)) (*PutBucketMetricsConfigurationOutput, error) { if params == nil { params = &PutBucketMetricsConfigurationInput{} @@ -73,7 +59,8 @@ type PutBucketMetricsConfigurationInput struct { // This member is required. Bucket *string - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // This member is required. Id *string @@ -83,14 +70,19 @@ type PutBucketMetricsConfigurationInput struct { // This member is required. MetricsConfiguration *types.MetricsConfiguration - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketMetricsConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -99,6 +91,9 @@ type PutBucketMetricsConfigurationOutput struct { } func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketMetricsConfiguration{}, middleware.After) if err != nil { return err @@ -107,6 +102,13 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketMetricsConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -125,16 +127,13 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -143,7 +142,10 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketMetricsConfigurationValidationMiddleware(stack); err != nil { @@ -155,6 +157,9 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketMetricsConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -170,14 +175,26 @@ func (c *Client) addOperationPutBucketMetricsConfigurationMiddlewares(stack *mid if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *PutBucketMetricsConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketMetricsConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketMetricsConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go index 8e771d6b..7139f6ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketNotificationConfiguration.go @@ -4,53 +4,51 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Enables notifications of specified events for a bucket. For more information -// about event notifications, see Configuring Event Notifications -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). Using -// this API, you can replace an existing notification configuration. The +// This operation is not supported by directory buckets. Enables notifications of +// specified events for a bucket. For more information about event notifications, +// see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// . Using this API, you can replace an existing notification configuration. The // configuration is an XML file that defines the event types that you want Amazon // S3 to publish and the destination where you want Amazon S3 to publish an event // notification when it detects an event of the specified type. By default, your // bucket has no event notifications configured. That is, the notification -// configuration will be an empty NotificationConfiguration. This action replaces -// the existing notification configuration with the configuration you include in -// the request body. After Amazon S3 receives this request, it first verifies that -// any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue -// Service (Amazon SQS) destination exists, and that the bucket owner has -// permission to publish to it by sending a test notification. In the case of +// configuration will be an empty NotificationConfiguration . This action +// replaces the existing notification configuration with the configuration you +// include in the request body. After Amazon S3 receives this request, it first +// verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon +// Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner +// has permission to publish to it by sending a test notification. In the case of // Lambda destinations, Amazon S3 verifies that the Lambda function permissions // grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For -// more information, see Configuring Notifications for Amazon S3 Events -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). You -// can disable notifications by adding the empty NotificationConfiguration element. -// For more information about the number of event notification configurations that -// you can create per bucket, see Amazon S3 service quotas -// (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) in Amazon Web -// Services General Reference. By default, only the bucket owner can configure -// notifications on a bucket. However, bucket owners can use a bucket policy to -// grant permission to other users to set this configuration with -// s3:PutBucketNotification permission. The PUT notification is an atomic +// more information, see Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// . You can disable notifications by adding the empty NotificationConfiguration +// element. For more information about the number of event notification +// configurations that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) +// in Amazon Web Services General Reference. By default, only the bucket owner can +// configure notifications on a bucket. However, bucket owners can use a bucket +// policy to grant permission to other users to set this configuration with the +// required s3:PutBucketNotification permission. The PUT notification is an atomic // operation. For example, suppose your notification configuration includes SNS // topic, SQS queue, and Lambda function configurations. When you send a PUT // request with this configuration, Amazon S3 sends test messages to your SNS // topic. If the message fails, the entire PUT action will fail, and Amazon S3 will -// not add the configuration to your bucket. Responses If the configuration in the -// request body includes only one TopicConfiguration specifying only the +// not add the configuration to your bucket. If the configuration in the request +// body includes only one TopicConfiguration specifying only the // s3:ReducedRedundancyLostObject event type, the response will also include the // x-amz-sns-test-message-id header containing the message ID of the test // notification sent to the topic. The following action is related to -// PutBucketNotificationConfiguration: -// -// * GetBucketNotificationConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// PutBucketNotificationConfiguration : +// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) func (c *Client) PutBucketNotificationConfiguration(ctx context.Context, params *PutBucketNotificationConfigurationInput, optFns ...func(*Options)) (*PutBucketNotificationConfigurationOutput, error) { if params == nil { params = &PutBucketNotificationConfigurationInput{} @@ -73,24 +71,29 @@ type PutBucketNotificationConfigurationInput struct { // This member is required. Bucket *string - // A container for specifying the notification configuration of the bucket. If this - // element is empty, notifications are turned off for the bucket. + // A container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off for the bucket. // // This member is required. NotificationConfiguration *types.NotificationConfiguration - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or // false value. - SkipDestinationValidation bool + SkipDestinationValidation *bool noSmithyDocumentSerde } +func (in *PutBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketNotificationConfigurationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -99,6 +102,9 @@ type PutBucketNotificationConfigurationOutput struct { } func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketNotificationConfiguration{}, middleware.After) if err != nil { return err @@ -107,6 +113,13 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketNotificationConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -125,16 +138,13 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -143,7 +153,10 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketNotificationConfigurationValidationMiddleware(stack); err != nil { @@ -155,6 +168,9 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketNotificationConfigurationUpdateEndpoint(stack, options); err != nil { return err } @@ -170,14 +186,26 @@ func (c *Client) addOperationPutBucketNotificationConfigurationMiddlewares(stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *PutBucketNotificationConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketNotificationConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketNotificationConfiguration", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go index 83210cac..f89f86d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketOwnershipControls.go @@ -4,27 +4,25 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this -// operation, you must have the s3:PutBucketOwnershipControls permission. For more -// information about Amazon S3 permissions, see Specifying permissions in a policy -// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html). -// For information about Amazon S3 Object Ownership, see Using object ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html). -// The following operations are related to PutBucketOwnershipControls: -// -// * -// GetBucketOwnershipControls -// -// * DeleteBucketOwnershipControls +// This operation is not supported by directory buckets. Creates or modifies +// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have +// the s3:PutBucketOwnershipControls permission. For more information about Amazon +// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html) +// . For information about Amazon S3 Object Ownership, see Using object ownership (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html) +// . The following operations are related to PutBucketOwnershipControls : +// - GetBucketOwnershipControls +// - DeleteBucketOwnershipControls func (c *Client) PutBucketOwnershipControls(ctx context.Context, params *PutBucketOwnershipControlsInput, optFns ...func(*Options)) (*PutBucketOwnershipControlsOutput, error) { if params == nil { params = &PutBucketOwnershipControlsInput{} @@ -58,14 +56,19 @@ type PutBucketOwnershipControlsInput struct { // this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketOwnershipControlsOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -74,6 +77,9 @@ type PutBucketOwnershipControlsOutput struct { } func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketOwnershipControls{}, middleware.After) if err != nil { return err @@ -82,6 +88,13 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketOwnershipControls"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -100,16 +113,13 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -118,7 +128,10 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketOwnershipControlsValidationMiddleware(stack); err != nil { @@ -130,6 +143,9 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketOwnershipControlsInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -148,14 +164,29 @@ func (c *Client) addOperationPutBucketOwnershipControlsMiddlewares(stack *middle if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketOwnershipControlsInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketOwnershipControls(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketOwnershipControls", } } @@ -170,9 +201,9 @@ func addPutBucketOwnershipControlsInputChecksumMiddlewares(stack *middleware.Sta }) } -// getPutBucketOwnershipControlsBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, +// getPutBucketOwnershipControlsBucketMember returns a pointer to string denoting +// a provided bucket member valueand a boolean indicating if the input has a +// modeled bucket name, func getPutBucketOwnershipControlsBucketMember(input interface{}) (*string, bool) { in := input.(*PutBucketOwnershipControlsInput) if in.Bucket == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go index 8860d3b5..b3da186a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketPolicy.go @@ -4,35 +4,59 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an -// identity other than the root user of the Amazon Web Services account that owns -// the bucket, the calling identity must have the PutBucketPolicy permissions on -// the specified bucket and belong to the bucket owner's account in order to use -// this operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns -// a 403 Access Denied error. If you have the correct permissions, but you're not +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. Directory buckets - +// For directory buckets, you must make requests for this API operation to the +// Regional endpoint. These endpoints support path-style requests in the format +// https://s3express-control.region_code.amazonaws.com/bucket-name . +// Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions If you are using an identity other than +// the root user of the Amazon Web Services account that owns the bucket, the +// calling identity must both have the PutBucketPolicy permissions on the +// specified bucket and belong to the bucket owner's account in order to use this +// operation. If you don't have PutBucketPolicy permissions, Amazon S3 returns a +// 403 Access Denied error. If you have the correct permissions, but you're not // using an identity that belongs to the bucket owner's account, Amazon S3 returns -// a 405 Method Not Allowed error. As a security precaution, the root user of the -// Amazon Web Services account that owns a bucket can always use this operation, -// even if the policy explicitly denies the root user the ability to perform this -// action. For more information, see Bucket policy examples -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html). -// The following operations are related to PutBucketPolicy: +// a 405 Method Not Allowed error. To ensure that bucket owners don't +// inadvertently lock themselves out of their own buckets, the root principal in a +// bucket owner's Amazon Web Services account can perform the GetBucketPolicy , +// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket +// policy explicitly denies the root principal's access. Bucket owner root +// principals can only be blocked from performing these API actions by VPC endpoint +// policies and Amazon Web Services Organizations policies. +// - General purpose bucket permissions - The s3:PutBucketPolicy permission is +// required in a policy. For more information about general purpose buckets bucket +// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation, you +// must have the s3express:PutBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web Services +// account that owns the resource. For more information about directory bucket +// policies and permissions, see Amazon Web Services Identity and Access +// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. // -// * CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// * -// DeleteBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// Example bucket policies General purpose buckets example bucket policies - See +// Bucket policy examples (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. Directory bucket example bucket policies - See +// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The +// HTTP Host header syntax is s3express-control.region.amazonaws.com . The +// following operations are related to PutBucketPolicy : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInput, optFns ...func(*Options)) (*PutBucketPolicyOutput, error) { if params == nil { params = &PutBucketPolicyInput{} @@ -50,44 +74,72 @@ func (c *Client) PutBucketPolicy(ctx context.Context, params *PutBucketPolicyInp type PutBucketPolicyInput struct { - // The name of the bucket. + // The name of the bucket. Directory buckets - When you use this operation with a + // directory bucket, you must use path-style requests in the format + // https://s3express-control.region_code.amazonaws.com/bucket-name . + // Virtual-hosted-style requests aren't supported. Directory bucket names must be + // unique in the chosen Availability Zone. Bucket names must also follow the format + // bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 + // ). For information about bucket naming restrictions, see Directory bucket + // naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide // // This member is required. Bucket *string - // The bucket policy as a JSON document. + // The bucket policy as a JSON document. For directory buckets, the only IAM + // action supported in the bucket policy is s3express:CreateSession . // // This member is required. Policy *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + // fails the request with the HTTP status code 400 Bad Request . For the + // x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // - CRC32 + // - CRC32C + // - SHA1 + // - SHA256 + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If the individual checksum value you provide + // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set + // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided + // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the + // provided value in x-amz-checksum-algorithm . For directory buckets, when you + // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's + // used for performance. ChecksumAlgorithm types.ChecksumAlgorithm // Set this parameter to true to confirm that you want to remove your permissions - // to change this bucket policy in the future. - ConfirmRemoveSelfBucketAccess bool + // to change this bucket policy in the future. This functionality is not supported + // for directory buckets. + ConfirmRemoveSelfBucketAccess *bool // The MD5 hash of the request body. For requests made using the Amazon Web // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // calculated automatically. This functionality is not supported for directory + // buckets. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). For directory buckets, this header + // is not supported in this API operation. If you specify this header, the request + // fails with the HTTP status code 501 Not Implemented . ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketPolicyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketPolicyOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -96,6 +148,9 @@ type PutBucketPolicyOutput struct { } func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketPolicy{}, middleware.After) if err != nil { return err @@ -104,6 +159,13 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketPolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -122,16 +184,13 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -140,7 +199,10 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketPolicyValidationMiddleware(stack); err != nil { @@ -152,6 +214,9 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketPolicyInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -170,14 +235,29 @@ func (c *Client) addOperationPutBucketPolicyMiddlewares(stack *middleware.Stack, if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketPolicyInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketPolicy(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketPolicy", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go index 2213373f..ddf58ad8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go @@ -4,67 +4,58 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates a replication configuration or replaces an existing one. For more -// information, see Replication -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon -// S3 User Guide. Specify the replication configuration in the request body. In the -// replication configuration, you provide the name of the destination bucket or -// buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon -// S3 can assume to replicate objects on your behalf, and other relevant -// information. A replication configuration must include at least one rule, and can -// contain a maximum of 1,000. Each rule identifies a subset of objects to +// This operation is not supported by directory buckets. Creates a replication +// configuration or replaces an existing one. For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. Specify the replication configuration in the +// request body. In the replication configuration, you provide the name of the +// destination bucket or buckets where you want Amazon S3 to replicate objects, the +// IAM role that Amazon S3 can assume to replicate objects on your behalf, and +// other relevant information. You can invoke this request for a specific Amazon +// Web Services Region by using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) +// condition key. A replication configuration must include at least one rule, and +// can contain a maximum of 1,000. Each rule identifies a subset of objects to // replicate by filtering the objects in the source bucket. To choose additional // subsets of objects to replicate, add a rule for each subset. To specify a subset // of the objects in the source bucket to apply a replication rule to, add the // Filter element as a child of the Rule element. You can filter objects based on // an object key prefix, one or more object tags, or both. When you add the Filter // element in the configuration, you must also add the following elements: -// DeleteMarkerReplication, Status, and Priority. If you are using an earlier +// DeleteMarkerReplication , Status , and Priority . If you are using an earlier // version of the replication configuration, Amazon S3 handles replication of -// delete markers differently. For more information, see Backward Compatibility -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). -// For information about enabling versioning on a bucket, see Using Versioning -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). Handling -// Replication of Encrypted Objects By default, Amazon S3 doesn't replicate objects -// that are stored at rest using server-side encryption with KMS keys. To replicate -// Amazon Web Services KMS-encrypted objects, add the following: -// SourceSelectionCriteria, SseKmsEncryptedObjects, Status, -// EncryptionConfiguration, and ReplicaKmsKeyID. For information about replication -// configuration, see Replicating Objects Created with SSE Using KMS keys -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). -// For information on PutBucketReplication errors, see List of replication-related -// error codes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) +// . For information about enabling versioning on a bucket, see Using Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) +// . Handling Replication of Encrypted Objects By default, Amazon S3 doesn't +// replicate objects that are stored at rest using server-side encryption with KMS +// keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: +// SourceSelectionCriteria , SseKmsEncryptedObjects , Status , +// EncryptionConfiguration , and ReplicaKmsKeyID . For information about +// replication configuration, see Replicating Objects Created with SSE Using KMS +// keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html) +// . For information on PutBucketReplication errors, see List of +// replication-related error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) // Permissions To create a PutBucketReplication request, you must have // s3:PutReplicationConfiguration permissions for the bucket. By default, a // resource owner, in this case the Amazon Web Services account that created the // bucket, can perform this operation. The resource owner can also grant others // permissions to perform the operation. For more information about permissions, -// see Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) and -// Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// To perform this operation, the user or role performing the action must have the -// iam:PassRole -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) -// permission. The following operations are related to PutBucketReplication: -// -// * -// GetBucketReplication -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) -// -// * -// DeleteBucketReplication -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . To perform this operation, the user or role performing the action must have +// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. The following operations are related to PutBucketReplication : +// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) func (c *Client) PutBucketReplication(ctx context.Context, params *PutBucketReplicationInput, optFns ...func(*Options)) (*PutBucketReplicationOutput, error) { if params == nil { params = &PutBucketReplicationInput{} @@ -93,28 +84,26 @@ type PutBucketReplicationInput struct { // This member is required. ReplicationConfiguration *types.ReplicationConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a - // message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // A token to allow Object Lock to be enabled for an existing bucket. @@ -123,6 +112,11 @@ type PutBucketReplicationInput struct { noSmithyDocumentSerde } +func (in *PutBucketReplicationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketReplicationOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -131,6 +125,9 @@ type PutBucketReplicationOutput struct { } func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketReplication{}, middleware.After) if err != nil { return err @@ -139,6 +136,13 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketReplication"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -157,16 +161,13 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -175,7 +176,10 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketReplicationValidationMiddleware(stack); err != nil { @@ -187,6 +191,9 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketReplicationInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -205,14 +212,29 @@ func (c *Client) addOperationPutBucketReplicationMiddlewares(stack *middleware.S if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketReplicationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketReplication(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketReplication", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go index c89d97be..d1dc5a76 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketRequestPayment.go @@ -4,28 +4,25 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download will -// be charged for the download. For more information, see Requester Pays Buckets -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). The -// following operations are related to PutBucketRequestPayment: -// -// * CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// * -// GetBucketRequestPayment -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// This operation is not supported by directory buckets. Sets the request payment +// configuration for a bucket. By default, the bucket owner pays for downloads from +// the bucket. This configuration parameter enables the bucket owner (only) to +// specify that the person requesting the download will be charged for the +// download. For more information, see Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) +// . The following operations are related to PutBucketRequestPayment : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) func (c *Client) PutBucketRequestPayment(ctx context.Context, params *PutBucketRequestPaymentInput, optFns ...func(*Options)) (*PutBucketRequestPaymentOutput, error) { if params == nil { params = &PutBucketRequestPaymentInput{} @@ -53,33 +50,36 @@ type PutBucketRequestPaymentInput struct { // This member is required. RequestPaymentConfiguration *types.RequestPaymentConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a - // message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketRequestPaymentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketRequestPaymentOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -88,6 +88,9 @@ type PutBucketRequestPaymentOutput struct { } func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketRequestPayment{}, middleware.After) if err != nil { return err @@ -96,6 +99,13 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketRequestPayment"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -114,16 +124,13 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -132,7 +139,10 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketRequestPaymentValidationMiddleware(stack); err != nil { @@ -144,6 +154,9 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketRequestPaymentInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -162,14 +175,29 @@ func (c *Client) addOperationPutBucketRequestPaymentMiddlewares(stack *middlewar if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketRequestPaymentInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketRequestPayment(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketRequestPayment", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go index f4101077..725facc1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go @@ -4,73 +4,50 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the tags for a bucket. Use tags to organize your Amazon Web Services bill -// to reflect your own cost structure. To do this, sign up to get your Amazon Web -// Services account bill with tag key values included. Then, to see the cost of -// combined resources, organize your billing information according to resources -// with the same tag key values. For example, you can tag several resources with a -// specific application name, and then organize your billing information to see the -// total cost of that application across several services. For more information, -// see Cost Allocation and Tagging -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) -// and Using Cost Allocation in Amazon S3 Bucket Tags -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). When -// this operation sets the tags for a bucket, it will overwrite any current tags -// the bucket already has. You cannot use this operation to add tags to an existing -// list of tags. To use this operation, you must have permissions to perform the -// s3:PutBucketTagging action. The bucket owner has this permission by default and -// can grant this permission to others. For more information about permissions, see -// Permissions Related to Bucket Subresource Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). -// PutBucketTagging has the following special errors: +// This operation is not supported by directory buckets. Sets the tags for a +// bucket. Use tags to organize your Amazon Web Services bill to reflect your own +// cost structure. To do this, sign up to get your Amazon Web Services account bill +// with tag key values included. Then, to see the cost of combined resources, +// organize your billing information according to resources with the same tag key +// values. For example, you can tag several resources with a specific application +// name, and then organize your billing information to see the total cost of that +// application across several services. For more information, see Cost Allocation +// and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) +// . When this operation sets the tags for a bucket, it will overwrite any current +// tags the bucket already has. You cannot use this operation to add tags to an +// existing list of tags. To use this operation, you must have permissions to +// perform the s3:PutBucketTagging action. The bucket owner has this permission by +// default and can grant this permission to others. For more information about +// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// . PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) +// . +// - InvalidTag - The tag provided was not a valid tag. This error can occur if +// the tag did not pass input validation. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) +// . +// - MalformedXML - The XML provided does not match the schema. +// - OperationAborted - A conflicting conditional action is currently in progress +// against this resource. Please try again. +// - InternalError - The service was unable to apply the provided tag to the +// bucket. // -// * Error code: -// InvalidTagError -// -// * Description: The tag provided was not a valid tag. This error -// can occur if the tag did not pass input validation. For information about tag -// restrictions, see User-Defined Tag Restrictions -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// and Amazon Web Services-Generated Cost Allocation Tag Restrictions -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). -// -// * -// Error code: MalformedXMLError -// -// * Description: The XML provided does not match -// the schema. -// -// * Error code: OperationAbortedError -// -// * Description: A conflicting -// conditional action is currently in progress against this resource. Please try -// again. -// -// * Error code: InternalError -// -// * Description: The service was unable to -// apply the provided tag to the bucket. -// -// The following operations are related to -// PutBucketTagging: -// -// * GetBucketTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) -// -// * -// DeleteBucketTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// The following operations are related to PutBucketTagging : +// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) func (c *Client) PutBucketTagging(ctx context.Context, params *PutBucketTaggingInput, optFns ...func(*Options)) (*PutBucketTaggingOutput, error) { if params == nil { params = &PutBucketTaggingInput{} @@ -98,33 +75,36 @@ type PutBucketTaggingInput struct { // This member is required. Tagging *types.Tagging - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a - // message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketTaggingOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -133,6 +113,9 @@ type PutBucketTaggingOutput struct { } func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketTagging{}, middleware.After) if err != nil { return err @@ -141,6 +124,13 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -159,16 +149,13 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -177,7 +164,10 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketTaggingValidationMiddleware(stack); err != nil { @@ -189,6 +179,9 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketTaggingInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -207,14 +200,29 @@ func (c *Client) addOperationPutBucketTaggingMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketTagging(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketTagging", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go index 6d7943e6..c2b751ab 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketVersioning.go @@ -4,46 +4,39 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the versioning state of an existing bucket. You can set the versioning -// state with one of the following values: Enabled—Enables versioning for the -// objects in the bucket. All objects added to the bucket receive a unique version -// ID. Suspended—Disables versioning for the objects in the bucket. All objects -// added to the bucket receive the version ID null. If the versioning state has -// never been set on a bucket, it has no versioning state; a GetBucketVersioning -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// This operation is not supported by directory buckets. Sets the versioning state +// of an existing bucket. You can set the versioning state with one of the +// following values: Enabled—Enables versioning for the objects in the bucket. All +// objects added to the bucket receive a unique version ID. Suspended—Disables +// versioning for the objects in the bucket. All objects added to the bucket +// receive the version ID null. If the versioning state has never been set on a +// bucket, it has no versioning state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) // request does not return a versioning state value. In order to enable MFA Delete, // you must be the bucket owner. If you are the bucket owner and want to enable MFA // Delete in the bucket versioning configuration, you must include the x-amz-mfa -// request header and the Status and the MfaDelete request elements in a request to -// set the versioning state of the bucket. If you have an object expiration -// lifecycle policy in your non-versioned bucket and you want to maintain the same -// permanent delete behavior when you enable versioning, you must add a noncurrent -// expiration policy. The noncurrent expiration lifecycle policy will manage the -// deletes of the noncurrent object versions in the version-enabled bucket. (A -// version-enabled bucket maintains one current and zero or more noncurrent object -// versions.) For more information, see Lifecycle and Versioning -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). -// Related Resources -// -// * CreateBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) -// -// * -// DeleteBucket -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) -// -// * -// GetBucketVersioning -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request header and the Status and the MfaDelete request elements in a request +// to set the versioning state of the bucket. If you have an object expiration +// lifecycle configuration in your non-versioned bucket and you want to maintain +// the same permanent delete behavior when you enable versioning, you must add a +// noncurrent expiration policy. The noncurrent expiration lifecycle configuration +// will manage the deletes of the noncurrent object versions in the version-enabled +// bucket. (A version-enabled bucket maintains one current and zero or more +// noncurrent object versions.) For more information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config) +// . The following operations are related to PutBucketVersioning : +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) func (c *Client) PutBucketVersioning(ctx context.Context, params *PutBucketVersioningInput, optFns ...func(*Options)) (*PutBucketVersioningOutput, error) { if params == nil { params = &PutBucketVersioningInput{} @@ -71,37 +64,40 @@ type PutBucketVersioningInput struct { // This member is required. VersioningConfiguration *types.VersioningConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm // >The base64-encoded 128-bit MD5 digest of the data. You must use this header as // a message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // The concatenation of the authentication device's serial number, a space, and the - // value that is displayed on your authentication device. + // The concatenation of the authentication device's serial number, a space, and + // the value that is displayed on your authentication device. MFA *string noSmithyDocumentSerde } +func (in *PutBucketVersioningInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketVersioningOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -110,6 +106,9 @@ type PutBucketVersioningOutput struct { } func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketVersioning{}, middleware.After) if err != nil { return err @@ -118,6 +117,13 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketVersioning"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -136,16 +142,13 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -154,7 +157,10 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketVersioningValidationMiddleware(stack); err != nil { @@ -166,6 +172,9 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketVersioningInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -184,20 +193,35 @@ func (c *Client) addOperationPutBucketVersioningMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketVersioningInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketVersioning(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketVersioning", } } -// getPutBucketVersioningRequestAlgorithmMember gets the request checksum algorithm -// value provided as input. +// getPutBucketVersioningRequestAlgorithmMember gets the request checksum +// algorithm value provided as input. func getPutBucketVersioningRequestAlgorithmMember(input interface{}) (string, bool) { in := input.(*PutBucketVersioningInput) if len(in.ChecksumAlgorithm) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go index 11cb4a35..27555453 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go @@ -4,86 +4,60 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the configuration of the website that is specified in the website -// subresource. To configure a bucket as a website, you can add this subresource on -// the bucket with website configuration information such as the file name of the -// index document and any redirect rules. For more information, see Hosting -// Websites on Amazon S3 -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). This PUT -// action requires the S3:PutBucketWebsite permission. By default, only the bucket -// owner can configure the website attached to a bucket; however, bucket owners can -// allow other users to set the website configuration by writing a bucket policy -// that grants them the S3:PutBucketWebsite permission. To redirect all website -// requests sent to the bucket's website endpoint, you add a website configuration -// with the following elements. Because all requests are sent to another website, -// you don't need to provide index document name for the bucket. +// This operation is not supported by directory buckets. Sets the configuration of +// the website that is specified in the website subresource. To configure a bucket +// as a website, you can add this subresource on the bucket with website +// configuration information such as the file name of the index document and any +// redirect rules. For more information, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// . This PUT action requires the S3:PutBucketWebsite permission. By default, only +// the bucket owner can configure the website attached to a bucket; however, bucket +// owners can allow other users to set the website configuration by writing a +// bucket policy that grants them the S3:PutBucketWebsite permission. To redirect +// all website requests sent to the bucket's website endpoint, you add a website +// configuration with the following elements. Because all requests are sent to +// another website, you don't need to provide index document name for the bucket. +// - WebsiteConfiguration +// - RedirectAllRequestsTo +// - HostName +// - Protocol // -// * -// WebsiteConfiguration +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website +// configuration must provide an index document for the bucket, because some +// requests might not be redirected. +// - WebsiteConfiguration +// - IndexDocument +// - Suffix +// - ErrorDocument +// - Key +// - RoutingRules +// - RoutingRule +// - Condition +// - HttpErrorCodeReturnedEquals +// - KeyPrefixEquals +// - Redirect +// - Protocol +// - HostName +// - ReplaceKeyPrefixWith +// - ReplaceKeyWith +// - HttpRedirectCode // -// * RedirectAllRequestsTo -// -// * HostName -// -// * Protocol -// -// If you -// want granular control over redirects, you can use the following elements to add -// routing rules that describe conditions for redirecting requests and information -// about the redirect destination. In this case, the website configuration must -// provide an index document for the bucket, because some requests might not be -// redirected. -// -// * WebsiteConfiguration -// -// * IndexDocument -// -// * Suffix -// -// * -// ErrorDocument -// -// * Key -// -// * RoutingRules -// -// * RoutingRule -// -// * Condition -// -// * -// HttpErrorCodeReturnedEquals -// -// * KeyPrefixEquals -// -// * Redirect -// -// * Protocol -// -// * -// HostName -// -// * ReplaceKeyPrefixWith -// -// * ReplaceKeyWith -// -// * HttpRedirectCode -// -// Amazon -// S3 has a limitation of 50 routing rules per website configuration. If you -// require more than 50 routing rules, you can use object redirect. For more -// information, see Configuring an Object Redirect -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) in -// the Amazon S3 User Guide. +// Amazon S3 has a limitation of 50 routing rules per website configuration. If +// you require more than 50 routing rules, you can use object redirect. For more +// information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB. func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { if params == nil { params = &PutBucketWebsiteInput{} @@ -111,33 +85,36 @@ type PutBucketWebsiteInput struct { // This member is required. WebsiteConfiguration *types.WebsiteConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. You must use this header as a - // message integrity check to verify that the request body was not corrupted in - // transit. For more information, see RFC 1864 - // (http://www.ietf.org/rfc/rfc1864.txt). For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The base64-encoded 128-bit MD5 digest of the data. You must use this header as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, see RFC 1864 (http://www.ietf.org/rfc/rfc1864.txt) + // . For requests made using the Amazon Web Services Command Line Interface (CLI) + // or Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutBucketWebsiteOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -146,6 +123,9 @@ type PutBucketWebsiteOutput struct { } func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutBucketWebsite{}, middleware.After) if err != nil { return err @@ -154,6 +134,13 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutBucketWebsite"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -172,16 +159,13 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -190,7 +174,10 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutBucketWebsiteValidationMiddleware(stack); err != nil { @@ -202,6 +189,9 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutBucketWebsiteInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -220,14 +210,29 @@ func (c *Client) addOperationPutBucketWebsiteMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutBucketWebsiteInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutBucketWebsite(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutBucketWebsite", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go index aa13f0e7..1bade82e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" @@ -16,89 +17,77 @@ import ( "time" ) -// Adds an object to a bucket. You must have WRITE permissions on a bucket to add -// an object to it. Amazon S3 never adds partial objects; if you receive a success -// response, Amazon S3 added the entire object to the bucket. Amazon S3 is a -// distributed system. If it receives multiple write requests for the same object -// simultaneously, it overwrites all but the last object written. Amazon S3 does -// not provide object locking; if you need this, make sure to build it into your -// application layer or use versioning instead. To ensure that data is not -// corrupted traversing the network, use the Content-MD5 header. When you use this -// header, Amazon S3 checks the object against the provided MD5 value and, if they -// do not match, returns an error. Additionally, you can calculate the MD5 while -// putting an object to Amazon S3 and compare the returned ETag to the calculated -// MD5 value. +// Adds an object to a bucket. +// - Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. You cannot use PutObject to +// only update a single piece of metadata for an existing object. You must put the +// entire object with updated metadata if you want to update some values. +// - If your bucket uses the bucket owner enforced setting for Object Ownership, +// ACLs are disabled and no longer affect permissions. All objects written to the +// bucket by any account will be owned by the bucket owner. +// - Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support +// virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +// Path-style requests are not supported. For more information, see Regional and +// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. // -// * To successfully complete the PutObject request, you must have the -// s3:PutObject in your IAM permissions. +// Amazon S3 is a distributed system. If it receives multiple write requests for +// the same object simultaneously, it overwrites all but the last object written. +// However, Amazon S3 provides features that can modify this behavior: +// - S3 Object Lock - To prevent objects from being deleted or overwritten, you +// can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. +// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 +// receives multiple write requests for the same object simultaneously, it stores +// all versions of the objects. For each write request that is made to the same +// object, Amazon S3 automatically generates a unique version ID of that object +// being stored in Amazon S3. You can retrieve, replace, or delete any version of +// the object. For more information about versioning, see Adding Objects to +// Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) +// in the Amazon S3 User Guide. For information about returning the versioning +// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// . This functionality is not supported for directory buckets. // -// * To successfully change the objects acl -// of your PutObject request, you must have the s3:PutObjectAcl in your IAM -// permissions. +// Permissions +// - General purpose bucket permissions - The following permissions are required +// in your policies when your PutObject request includes specific headers. +// - s3:PutObject - To successfully complete the PutObject request, you must +// always have the s3:PutObject permission on a bucket to add an object to it. +// - s3:PutObjectAcl - To successfully change the objects ACL of your PutObject +// request, you must have the s3:PutObjectAcl . +// - s3:PutObjectTagging - To successfully set the tag-set with your PutObject +// request, you must have the s3:PutObjectTagging . +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * The Content-MD5 header is required for any request to upload an -// object with a retention period configured using Amazon S3 Object Lock. For more -// information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) in -// the Amazon S3 User Guide. +// Data integrity with Content-MD5 +// - General purpose bucket - To ensure that data is not corrupted traversing +// the network, use the Content-MD5 header. When you use this header, Amazon S3 +// checks the object against the provided MD5 value and, if they do not match, +// Amazon S3 returns an error. Alternatively, when the object's ETag is its MD5 +// digest, you can calculate the MD5 while putting the object to Amazon S3 and +// compare the returned ETag to the calculated MD5 value. +// - Directory bucket - This functionality is not supported for directory +// buckets. // -// Server-side Encryption You can optionally request -// server-side encryption. With server-side encryption, Amazon S3 encrypts your -// data as it writes it to disks in its data centers and decrypts the data when you -// access it. You have the option to provide your own encryption key or use Amazon -// Web Services managed encryption keys (SSE-S3 or SSE-KMS). For more information, -// see Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). -// If you request server-side encryption using Amazon Web Services Key Management -// Service (SSE-KMS), you can enable an S3 Bucket Key at the object-level. For more -// information, see Amazon S3 Bucket Keys -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon -// S3 User Guide. Access Control List (ACL)-Specific Request Headers You can use -// headers to grant ACL- based permissions. By default, all objects are private. -// Only the owner has full access control. When adding a new object, you can grant -// permissions to individual Amazon Web Services accounts or to predefined groups -// defined by Amazon S3. These permissions are then added to the ACL on the object. -// For more information, see Access Control List (ACL) Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing -// ACLs Using the REST API -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). If -// the bucket that you're uploading objects to uses the bucket owner enforced -// setting for S3 Object Ownership, ACLs are disabled and no longer affect -// permissions. Buckets that use this setting only accept PUT requests that don't -// specify an ACL or PUT requests that specify bucket owner full control ACLs, such -// as the bucket-owner-full-control canned ACL or an equivalent form of this ACL -// expressed in the XML format. PUT requests that contain other ACLs (for example, -// custom grants to certain Amazon Web Services accounts) fail and return a 400 -// error with the error code AccessControlListNotSupported. For more information, -// see Controlling ownership of objects and disabling ACLs -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced -// setting for Object Ownership, all objects written to the bucket by any account -// will be owned by the bucket owner. Storage Class Options By default, Amazon S3 -// uses the STANDARD Storage Class to store newly created objects. The STANDARD -// storage class provides high durability and high availability. Depending on -// performance needs, you can specify a different Storage Class. Amazon S3 on -// Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage -// Classes -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in -// the Amazon S3 User Guide. Versioning If you enable versioning for a bucket, -// Amazon S3 automatically generates a unique version ID for the object being -// stored. Amazon S3 returns this ID in the response. When you enable versioning -// for a bucket, if Amazon S3 receives multiple write requests for the same object -// simultaneously, it stores all of the objects. For more information about -// versioning, see Adding Objects to Versioning Enabled Buckets -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). -// For information about returning the versioning state of a bucket, see -// GetBucketVersioning -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). -// Related Resources -// -// * CopyObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// -// * -// DeleteObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . For more information about +// related Amazon S3 APIs, see the following: +// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns ...func(*Options)) (*PutObjectOutput, error) { if params == nil { params = &PutObjectInput{} @@ -116,23 +105,31 @@ func (c *Client) PutObject(ctx context.Context, params *PutObjectInput, optFns . type PutObjectInput struct { - // The bucket name to which the PUT action was initiated. When using this action - // with an access point, you must direct requests to the access point hostname. The - // access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The bucket name to which the PUT action was initiated. Directory buckets - When + // you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -142,79 +139,103 @@ type PutObjectInput struct { // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). - // This action is not supported by Amazon S3 on Outposts. + // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. When adding a new object, you can use headers to + // grant ACL-based permissions to individual Amazon Web Services accounts or to + // predefined groups defined by Amazon S3. These permissions are then added to the + // ACL on the object. By default, all objects are private. Only the owner has full + // access control. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. If the bucket that you're uploading objects to uses + // the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and + // no longer affect permissions. Buckets that use this setting only accept PUT + // requests that don't specify an ACL or PUT requests that specify bucket owner + // full control ACLs, such as the bucket-owner-full-control canned ACL or an + // equivalent form of this ACL expressed in the XML format. PUT requests that + // contain other ACLs (for example, custom grants to certain Amazon Web Services + // accounts) fail and return a 400 error with the error code + // AccessControlListNotSupported . For more information, see Controlling + // ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. ACL types.ObjectCannedACL // Object data. Body io.Reader // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption - // with server-side encryption using AWS KMS (SSE-KMS). Setting this header to true - // causes Amazon S3 to use an S3 Bucket Key for object encryption with SSE-KMS. - // Specifying this header with a PUT action doesn’t affect bucket-level settings - // for S3 Bucket Key. - BucketKeyEnabled bool + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object + // encryption with SSE-KMS. Specifying this header with a PUT action doesn’t affect + // bucket-level settings for S3 Bucket Key. This functionality is not supported for + // directory buckets. + BucketKeyEnabled *bool // Can be used to specify caching behavior along the request/reply chain. For more - // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) + // . CacheControl *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) - // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 - // ignores any provided ChecksumAlgorithm parameter. + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3 + // fails the request with the HTTP status code 400 Bad Request . For the + // x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // - CRC32 + // - CRC32C + // - SHA1 + // - SHA256 + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. If the individual checksum value you provide + // through x-amz-checksum-algorithm doesn't match the checksum algorithm you set + // through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided + // ChecksumAlgorithm parameter and uses the checksum algorithm that matches the + // provided value in x-amz-checksum-algorithm . For directory buckets, when you + // use Amazon Web Services SDKs, CRC32 is the default checksum algorithm that's + // used for performance. ChecksumAlgorithm types.ChecksumAlgorithm // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string // Specifies presentational information for the object. For more information, see - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1). + // https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4) + // . ContentDisposition *string // Specifies what content encodings have been applied to the object and thus what // decoding mechanisms must be applied to obtain the media-type referenced by the // Content-Type header field. For more information, see - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11). + // https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding) + // . ContentEncoding *string // The language the content is in. @@ -222,219 +243,283 @@ type PutObjectInput struct { // Size of the body in bytes. This parameter is useful when the size of the body // cannot be determined automatically. For more information, see - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13). - ContentLength int64 + // https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length) + // . + ContentLength *int64 // The base64-encoded 128-bit MD5 digest of the message (without the headers) // according to RFC 1864. This header can be used as a message integrity check to // verify that the data is the same data that was originally sent. Although it is // optional, we recommend using the Content-MD5 mechanism as an end-to-end // integrity check. For more information about REST request authentication, see - // REST Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + // REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // . The Content-MD5 header is required for any request to upload an object with a + // retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. ContentMD5 *string // A standard MIME type describing the format of the contents. For more - // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). + // information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type) + // . ContentType *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The date and time at which the object is no longer cacheable. For more - // information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 - // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). + // information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3) + // . Expires *time.Time - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This - // action is not supported by Amazon S3 on Outposts. + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to read the object data and its metadata. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to read the object data and its metadata. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the object ACL. This action is not supported by Amazon S3 - // on Outposts. + // Allows grantee to read the object ACL. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantReadACP *string - // Allows grantee to write the ACL for the applicable object. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable object. + // - This functionality is not supported for directory buckets. + // - This functionality is not supported for Amazon S3 on Outposts. GrantWriteACP *string // A map of metadata to store with the object in S3. Metadata map[string]string // Specifies whether a legal hold will be applied to this object. For more - // information about S3 Object Lock, see Object Lock - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus - // The Object Lock mode that you want to apply to this object. + // The Object Lock mode that you want to apply to this object. This functionality + // is not supported for directory buckets. ObjectLockMode types.ObjectLockMode // The date and time when you want this object's Object Lock to expire. Must be - // formatted as a timestamp parameter. + // formatted as a timestamp parameter. This functionality is not supported for + // directory buckets. ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, AES256 + // ). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. + // x-amz-server-side-encryption-customer-algorithm header. This functionality is + // not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. SSECustomerKeyMD5 *string // Specifies the Amazon Web Services KMS Encryption Context to use for object // encryption. The value of this header is a base64-encoded UTF-8 string holding - // JSON with the encryption context key-value pairs. + // JSON with the encryption context key-value pairs. This value is stored as object + // metadata and automatically gets passed on to Amazon Web Services KMS for future + // GetObject or CopyObject operations on this object. This value must be + // explicitly added during CopyObject operations. This functionality is not + // supported for directory buckets. SSEKMSEncryptionContext *string - // If x-amz-server-side-encryption is present and has the value of aws:kms, this - // header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetrical customer managed key that was used for the - // object. If you specify x-amz-server-side-encryption:aws:kms, but do not provide - // x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web - // Services managed key to protect the data. If the KMS key does not exist in the - // same account issuing the command, you must use the full ARN and not just the ID. + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key + // Management Service (KMS) symmetric encryption customer managed key that was used + // for the object. If you specify x-amz-server-side-encryption:aws:kms or + // x-amz-server-side-encryption:aws:kms:dsse , but do not provide + // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web + // Services managed key ( aws/s3 ) to protect the data. If the KMS key does not + // exist in the same account that's issuing the command, you must use the full ARN + // and not just the ID. This functionality is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256, aws:kms). + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256 , aws:kms , aws:kms:dsse ). General purpose + // buckets - You have four mutually exclusive options to protect data using + // server-side encryption in Amazon S3, depending on how you choose to manage the + // encryption keys. Specifically, the encryption key options are Amazon S3 managed + // keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and + // customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side + // encryption by using Amazon S3 managed keys (SSE-S3) by default. You can + // optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. Directory buckets - For directory buckets, only the + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) value is + // supported. ServerSideEncryption types.ServerSideEncryption // By default, Amazon S3 uses the STANDARD Storage Class to store newly created // objects. The STANDARD storage class provides high durability and high // availability. Depending on performance needs, you can specify a different - // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For - // more information, see Storage Classes - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in - // the Amazon S3 User Guide. + // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // - For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. StorageClass types.StorageClass - // The tag-set for the object. The tag-set must be encoded as URL Query parameters. - // (For example, "Key1=Value1") + // The tag-set for the object. The tag-set must be encoded as URL Query + // parameters. (For example, "Key1=Value1") This functionality is not supported for + // directory buckets. Tagging *string // If the bucket is configured as a website, redirects requests for this object to // another object in the same bucket or to an external URL. Amazon S3 stores the // value of this header in the object metadata. For information about object - // metadata, see Object Key and Metadata - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html). In the - // following example, the request header sets the redirect to an object - // (anotherPage.html) in the same bucket: x-amz-website-redirect-location: - // /anotherPage.html In the following example, the request header sets the object - // redirect to another website: x-amz-website-redirect-location: - // http://www.example.com/ For more information about website hosting in Amazon S3, - // see Hosting Websites on Amazon S3 - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) and How to - // Configure Website Page Redirects - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html). + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 User Guide. In the following example, the request header sets + // the redirect to an object (anotherPage.html) in the same bucket: + // x-amz-website-redirect-location: /anotherPage.html In the following example, the + // request header sets the object redirect to another website: + // x-amz-website-redirect-location: http://www.example.com/ For more information + // about website hosting in Amazon S3, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. WebsiteRedirectLocation *string noSmithyDocumentSerde } +func (in *PutObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type PutObjectOutput struct { // Indicates whether the uploaded object uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string - // Entity tag for the uploaded object. + // Entity tag for the uploaded object. General purpose buckets - To ensure that + // data is not corrupted traversing the network, for objects where the ETag is the + // MD5 digest of the object, you can calculate the MD5 while putting an object to + // Amazon S3 and compare the returned ETag to the calculated MD5 value. Directory + // buckets - The ETag for the object in a directory bucket isn't the MD5 digest of + // the object. ETag *string // If the expiration is configured for the object (see - // PutBucketLifecycleConfiguration - // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), - // the response includes this header. It includes the expiry-date and rule-id - // key-value pairs that provide information about object expiration. The value of - // the rule-id is URL-encoded. + // PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) + // ) in the Amazon S3 User Guide, the response includes this header. It includes + // the expiry-date and rule-id key-value pairs that provide information about + // object expiration. The value of the rule-id is URL-encoded. This functionality + // is not supported for directory buckets. Expiration *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm used. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the Amazon Web Services KMS Encryption Context to use for + // If present, indicates the Amazon Web Services KMS Encryption Context to use for // object encryption. The value of this header is a base64-encoded UTF-8 string - // holding JSON with the encryption context key-value pairs. + // holding JSON with the encryption context key-value pairs. This value is stored + // as object metadata and automatically gets passed on to Amazon Web Services KMS + // for future GetObject or CopyObject operations on this object. This + // functionality is not supported for directory buckets. SSEKMSEncryptionContext *string - // If x-amz-server-side-encryption is present and has the value of aws:kms, this - // header specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for the - // object. + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , + // this header indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // If you specified server-side encryption either with an Amazon Web Services KMS - // key or Amazon S3-managed encryption key in your PUT request, the response - // includes this header. It confirms the encryption algorithm that Amazon S3 used - // to encrypt the object. + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only + // server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is + // supported. ServerSideEncryption types.ServerSideEncryption - // Version of the object. + // Version ID of the object. If you enable versioning for a bucket, Amazon S3 + // automatically generates a unique version ID for the object being stored. Amazon + // S3 returns this ID in the response. When you enable versioning for a bucket, if + // Amazon S3 receives multiple write requests for the same object simultaneously, + // it stores all of the objects. For more information about versioning, see Adding + // Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) + // in the Amazon S3 User Guide. For information about returning the versioning + // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) + // . This functionality is not supported for directory buckets. VersionId *string // Metadata pertaining to the operation's result. @@ -444,6 +529,9 @@ type PutObjectOutput struct { } func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutObject{}, middleware.After) if err != nil { return err @@ -452,6 +540,13 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -470,16 +565,13 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -488,7 +580,10 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutObjectValidationMiddleware(stack); err != nil { @@ -503,6 +598,9 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = add100Continue(stack, options); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutObjectInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -524,14 +622,26 @@ func (c *Client) addOperationPutObjectMiddlewares(stack *middleware.Stack, optio if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *PutObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutObject(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutObject", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go index 05a377b5..5716f550 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -13,140 +14,87 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Uses the acl subresource to set the access control list (ACL) permissions for a -// new or existing object in an S3 bucket. You must have WRITE_ACP permission to -// set the ACL of an object. For more information, see What permissions can I -// grant? -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon S3 User Guide. This action is not supported by Amazon S3 on -// Outposts. Depending on your application needs, you can choose to set the ACL on -// an object using either the request body or the headers. For example, if you have -// an existing application that updates a bucket ACL using the request body, you -// can continue to use that approach. For more information, see Access Control List -// (ACL) Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) in the -// Amazon S3 User Guide. If your bucket uses the bucket owner enforced setting for -// S3 Object Ownership, ACLs are disabled and no longer affect permissions. You -// must use policies to grant access to your bucket and the objects in it. Requests -// to set ACLs or update ACLs fail and return the AccessControlListNotSupported -// error code. Requests to read ACLs are still supported. For more information, see -// Controlling object ownership -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) -// in the Amazon S3 User Guide. Access Permissions You can set access permissions -// using one of the following methods: +// This operation is not supported by directory buckets. Uses the acl subresource +// to set the access control list (ACL) permissions for a new or existing object in +// an S3 bucket. You must have the WRITE_ACP permission to set the ACL of an +// object. For more information, see What permissions can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon S3 User Guide. This functionality is not supported for Amazon S3 +// on Outposts. Depending on your application needs, you can choose to set the ACL +// on an object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request body, +// you can continue to use that approach. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced +// setting for S3 Object Ownership, ACLs are disabled and no longer affect +// permissions. You must use policies to grant access to your bucket and the +// objects in it. Requests to set ACLs or update ACLs fail and return the +// AccessControlListNotSupported error code. Requests to read ACLs are still +// supported. For more information, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. Permissions You can set access permissions using +// one of the following methods: +// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports a +// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined +// set of grantees and permissions. Specify the canned ACL name as the value of +// x-amz-ac l. If you use this header, you cannot use other access +// control-specific headers in your request. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) +// . +// - Specify access permissions explicitly with the x-amz-grant-read , +// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control +// headers. When using these headers, you specify explicit access permissions and +// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the +// permission. If you use these ACL-specific headers, you cannot use x-amz-acl +// header to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control List +// (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// . You specify each grantee as a type=value pair, where the type is one of the +// following: +// - id – if the value specified is the canonical user ID of an Amazon Web +// Services account +// - uri – if you are granting permissions to a predefined group +// - emailAddress – if the value specified is the email address of an Amazon Web +// Services account Using email addresses to specify a grantee is only supported in +// the following Amazon Web Services Regions: +// - US East (N. Virginia) +// - US West (N. California) +// - US West (Oregon) +// - Asia Pacific (Singapore) +// - Asia Pacific (Sydney) +// - Asia Pacific (Tokyo) +// - Europe (Ireland) +// - South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-read header grants list objects permission to the two Amazon Web +// Services accounts identified by their email addresses. x-amz-grant-read: +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" // -// * Specify a canned ACL with the x-amz-acl -// request header. Amazon S3 supports a set of predefined ACLs, known as canned -// ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify -// the canned ACL name as the value of x-amz-acl. If you use this header, you -// cannot use other access control-specific headers in your request. For more -// information, see Canned ACL -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// You can use either a canned ACL or specify access permissions explicitly. You +// cannot do both. Grantee Values You can specify the person (grantee) to whom +// you're assigning access rights (using request elements) in the following ways: +// - By the person's ID: <>ID<><>GranteesEmail<> DisplayName is optional and +// ignored in the request. +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// - By Email address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved +// to the CanonicalUser and, in a response to a GET Object acl request, appears as +// the CanonicalUser. Using email addresses to specify a grantee is only supported +// in the following Amazon Web Services Regions: +// - US East (N. Virginia) +// - US West (N. California) +// - US West (Oregon) +// - Asia Pacific (Singapore) +// - Asia Pacific (Sydney) +// - Asia Pacific (Tokyo) +// - Europe (Ireland) +// - South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. // -// * -// Specify access permissions explicitly with the x-amz-grant-read, -// x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control -// headers. When using these headers, you specify explicit access permissions and -// grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the -// permission. If you use these ACL-specific headers, you cannot use x-amz-acl -// header to set a canned ACL. These parameters map to the set of permissions that -// Amazon S3 supports in an ACL. For more information, see Access Control List -// (ACL) Overview -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify -// each grantee as a type=value pair, where the type is one of the following: -// -// * id -// – if the value specified is the canonical user ID of an Amazon Web Services -// account -// -// * uri – if you are granting permissions to a predefined group -// -// * -// emailAddress – if the value specified is the email address of an Amazon Web -// Services account Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// -// * US East (N. Virginia) -// -// * US West -// (N. California) -// -// * US West (Oregon) -// -// * Asia Pacific (Singapore) -// -// * Asia Pacific -// (Sydney) -// -// * Asia Pacific (Tokyo) -// -// * Europe (Ireland) -// -// * South America (São -// Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see -// Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the -// Amazon Web Services General Reference. -// -// For example, the following -// x-amz-grant-read header grants list objects permission to the two Amazon Web -// Services accounts identified by their email addresses. x-amz-grant-read: -// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" -// -// You can use either -// a canned ACL or specify access permissions explicitly. You cannot do both. -// Grantee Values You can specify the person (grantee) to whom you're assigning -// access rights (using request elements) in the following ways: -// -// * By the person's -// ID: <>ID<><>GranteesEmail<> DisplayName is optional and ignored in the -// request. -// -// * By URI: -// <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> -// -// * By Email -// address: <>Grantees@email.com<>lt;/Grantee> The grantee is resolved to the -// CanonicalUser and, in a response to a GET Object acl request, appears as the -// CanonicalUser. Using email addresses to specify a grantee is only supported in -// the following Amazon Web Services Regions: -// -// * US East (N. Virginia) -// -// * US West -// (N. California) -// -// * US West (Oregon) -// -// * Asia Pacific (Singapore) -// -// * Asia Pacific -// (Sydney) -// -// * Asia Pacific (Tokyo) -// -// * Europe (Ireland) -// -// * South America (São -// Paulo) -// -// For a list of all the Amazon S3 supported Regions and endpoints, see -// Regions and Endpoints -// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the -// Amazon Web Services General Reference. -// -// Versioning The ACL of an object is set -// at the object version level. By default, PUT sets the ACL of the current version -// of an object. To set the ACL of a different version, use the versionId -// subresource. Related Resources -// -// * CopyObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// -// * -// GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// Versioning The ACL of an object is set at the object version level. By default, +// PUT sets the ACL of the current version of an object. To set the ACL of a +// different version, use the versionId subresource. The following operations are +// related to PutObjectAcl : +// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, optFns ...func(*Options)) (*PutObjectAclOutput, error) { if params == nil { params = &PutObjectAclInput{} @@ -165,80 +113,70 @@ func (c *Client) PutObjectAcl(ctx context.Context, params *PutObjectAclInput, op type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the ACL. - // When using this action with an access point, you must direct requests to the + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) // in the Amazon S3 User Guide. // // This member is required. Bucket *string - // Key for which the PUT action was initiated. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // Key for which the PUT action was initiated. // // This member is required. Key *string - // The canned ACL to apply to the object. For more information, see Canned ACL - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // The canned ACL to apply to the object. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // . ACL types.ObjectCannedACL // Contains the elements that set the ACL permissions for an object per grantee. AccessControlPolicy *types.AccessControlPolicy - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The base64-encoded 128-bit MD5 digest of the data. This header must be used as a - // message integrity check to verify that the request body was not corrupted in - // transit. For more information, go to RFC 1864.> - // (http://www.ietf.org/rfc/rfc1864.txt) For requests made using the Amazon Web - // Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is - // calculated automatically. + // The base64-encoded 128-bit MD5 digest of the data. This header must be used as + // a message integrity check to verify that the request body was not corrupted in + // transit. For more information, go to RFC 1864.> (http://www.ietf.org/rfc/rfc1864.txt) + // For requests made using the Amazon Web Services Command Line Interface (CLI) or + // Amazon Web Services SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. This action is not supported by Amazon S3 on Outposts. + // bucket. This functionality is not supported for Amazon S3 on Outposts. GrantFullControl *string - // Allows grantee to list the objects in the bucket. This action is not supported - // by Amazon S3 on Outposts. + // Allows grantee to list the objects in the bucket. This functionality is not + // supported for Amazon S3 on Outposts. GrantRead *string - // Allows grantee to read the bucket ACL. This action is not supported by Amazon S3 - // on Outposts. + // Allows grantee to read the bucket ACL. This functionality is not supported for + // Amazon S3 on Outposts. GrantReadACP *string // Allows grantee to create new objects in the bucket. For the bucket and object @@ -246,28 +184,37 @@ type PutObjectAclInput struct { // objects. GrantWrite *string - // Allows grantee to write the ACL for the applicable bucket. This action is not - // supported by Amazon S3 on Outposts. + // Allows grantee to write the ACL for the applicable bucket. This functionality + // is not supported for Amazon S3 on Outposts. GrantWriteACP *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // VersionId used to reference a specific version of the object. + // Version ID used to reference a specific version of the object. This + // functionality is not supported for directory buckets. VersionId *string noSmithyDocumentSerde } +func (in *PutObjectAclInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type PutObjectAclOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -277,6 +224,9 @@ type PutObjectAclOutput struct { } func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectAcl{}, middleware.After) if err != nil { return err @@ -285,6 +235,13 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectAcl"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -303,16 +260,13 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -321,7 +275,10 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutObjectAclValidationMiddleware(stack); err != nil { @@ -333,6 +290,9 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutObjectAclInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -351,14 +311,29 @@ func (c *Client) addOperationPutObjectAclMiddlewares(stack *middleware.Stack, op if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutObjectAclInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutObjectAcl(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutObjectAcl", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go index b8004b59..3fa38af3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -13,10 +14,9 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Applies a legal hold configuration to the specified object. For more -// information, see Locking Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This action -// is not supported by Amazon S3 on Outposts. +// This operation is not supported by directory buckets. Applies a legal hold +// configuration to the specified object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . This functionality is not supported for Amazon S3 on Outposts. func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalHoldInput, optFns ...func(*Options)) (*PutObjectLegalHoldOutput, error) { if params == nil { params = &PutObjectLegalHoldInput{} @@ -35,13 +35,14 @@ func (c *Client) PutObjectLegalHold(ctx context.Context, params *PutObjectLegalH type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a legal hold on. - // When using this action with an access point, you must direct requests to the + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the access + // point ARN. When using the access point ARN, you must direct requests to the // access point hostname. The access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // This member is required. @@ -52,13 +53,12 @@ type PutObjectLegalHoldInput struct { // This member is required. Key *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -68,9 +68,9 @@ type PutObjectLegalHoldInput struct { // calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Container element for the legal hold configuration you want to apply to the @@ -78,11 +78,13 @@ type PutObjectLegalHoldInput struct { LegalHold *types.ObjectLockLegalHold // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The version ID of the object that you want to place a legal hold on. @@ -91,10 +93,15 @@ type PutObjectLegalHoldInput struct { noSmithyDocumentSerde } +func (in *PutObjectLegalHoldInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type PutObjectLegalHoldOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -104,6 +111,9 @@ type PutObjectLegalHoldOutput struct { } func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLegalHold{}, middleware.After) if err != nil { return err @@ -112,6 +122,13 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLegalHold"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -130,16 +147,13 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -148,7 +162,10 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutObjectLegalHoldValidationMiddleware(stack); err != nil { @@ -160,6 +177,9 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutObjectLegalHoldInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -178,14 +198,29 @@ func (c *Client) addOperationPutObjectLegalHoldMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutObjectLegalHoldInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutObjectLegalHold(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutObjectLegalHold", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go index 9740967a..49425c8f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -13,21 +14,17 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Places an Object Lock configuration on the specified bucket. The rule specified -// in the Object Lock configuration will be applied by default to every new object -// placed in the specified bucket. For more information, see Locking Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). -// -// * The -// DefaultRetention settings require both a mode and a period. -// -// * The -// DefaultRetention period can be either Days or Years but you must select one. You -// cannot specify Days and Years at the same time. -// -// * You can only enable Object -// Lock for new buckets. If you want to turn on Object Lock for an existing bucket, -// contact Amazon Web Services Support. +// This operation is not supported by directory buckets. Places an Object Lock +// configuration on the specified bucket. The rule specified in the Object Lock +// configuration will be applied by default to every new object placed in the +// specified bucket. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . +// - The DefaultRetention settings require both a mode and a period. +// - The DefaultRetention period can be either Days or Years but you must select +// one. You cannot specify Days and Years at the same time. +// - You can enable Object Lock for new or existing buckets. For more +// information, see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html) +// . func (c *Client) PutObjectLockConfiguration(ctx context.Context, params *PutObjectLockConfigurationInput, optFns ...func(*Options)) (*PutObjectLockConfigurationOutput, error) { if params == nil { params = &PutObjectLockConfigurationInput{} @@ -50,13 +47,12 @@ type PutObjectLockConfigurationInput struct { // This member is required. Bucket *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -66,20 +62,22 @@ type PutObjectLockConfigurationInput struct { // calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // The Object Lock configuration that you want to apply to the specified bucket. ObjectLockConfiguration *types.ObjectLockConfiguration // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // A token to allow Object Lock to be enabled for an existing bucket. @@ -88,10 +86,15 @@ type PutObjectLockConfigurationInput struct { noSmithyDocumentSerde } +func (in *PutObjectLockConfigurationInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type PutObjectLockConfigurationOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -101,6 +104,9 @@ type PutObjectLockConfigurationOutput struct { } func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectLockConfiguration{}, middleware.After) if err != nil { return err @@ -109,6 +115,13 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectLockConfiguration"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -127,16 +140,13 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -145,7 +155,10 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutObjectLockConfigurationValidationMiddleware(stack); err != nil { @@ -157,6 +170,9 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutObjectLockConfigurationInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -175,14 +191,29 @@ func (c *Client) addOperationPutObjectLockConfigurationMiddlewares(stack *middle if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutObjectLockConfigurationInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutObjectLockConfiguration(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutObjectLockConfiguration", } } @@ -207,9 +238,9 @@ func addPutObjectLockConfigurationInputChecksumMiddlewares(stack *middleware.Sta }) } -// getPutObjectLockConfigurationBucketMember returns a pointer to string denoting a -// provided bucket member valueand a boolean indicating if the input has a modeled -// bucket name, +// getPutObjectLockConfigurationBucketMember returns a pointer to string denoting +// a provided bucket member valueand a boolean indicating if the input has a +// modeled bucket name, func getPutObjectLockConfigurationBucketMember(input interface{}) (*string, bool) { in := input.(*PutObjectLockConfigurationInput) if in.Bucket == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go index c4918f3c..5dfb98f3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -13,13 +14,12 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Places an Object Retention configuration on an object. For more information, see -// Locking Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). Users or -// accounts require the s3:PutObjectRetention permission in order to place an -// Object Retention configuration on objects. Bypassing a Governance Retention -// configuration requires the s3:BypassGovernanceRetention permission. This action -// is not supported by Amazon S3 on Outposts. +// This operation is not supported by directory buckets. Places an Object +// Retention configuration on an object. For more information, see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// . Users or accounts require the s3:PutObjectRetention permission in order to +// place an Object Retention configuration on objects. Bypassing a Governance +// Retention configuration requires the s3:BypassGovernanceRetention permission. +// This functionality is not supported for Amazon S3 on Outposts. func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetentionInput, optFns ...func(*Options)) (*PutObjectRetentionOutput, error) { if params == nil { params = &PutObjectRetentionInput{} @@ -37,14 +37,15 @@ func (c *Client) PutObjectRetention(ctx context.Context, params *PutObjectRetent type PutObjectRetentionInput struct { - // The bucket name that contains the object you want to apply this Object Retention - // configuration to. When using this action with an access point, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // The bucket name that contains the object you want to apply this Object + // Retention configuration to. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) // in the Amazon S3 User Guide. // // This member is required. @@ -57,15 +58,14 @@ type PutObjectRetentionInput struct { Key *string // Indicates whether this action should bypass Governance-mode restrictions. - BypassGovernanceRetention bool - - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + BypassGovernanceRetention *bool + + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -75,17 +75,19 @@ type PutObjectRetentionInput struct { // calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The container element for the Object Retention configuration. @@ -98,10 +100,15 @@ type PutObjectRetentionInput struct { noSmithyDocumentSerde } +func (in *PutObjectRetentionInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type PutObjectRetentionOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Metadata pertaining to the operation's result. @@ -111,6 +118,9 @@ type PutObjectRetentionOutput struct { } func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectRetention{}, middleware.After) if err != nil { return err @@ -119,6 +129,13 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectRetention"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -137,16 +154,13 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -155,7 +169,10 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutObjectRetentionValidationMiddleware(stack); err != nil { @@ -167,6 +184,9 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutObjectRetentionInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -185,14 +205,29 @@ func (c *Client) addOperationPutObjectRetentionMiddlewares(stack *middleware.Sta if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutObjectRetentionInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutObjectRetention(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutObjectRetention", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go index 43effb9e..8b42d43d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -13,56 +14,35 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Sets the supplied tag-set to an object that already exists in a bucket. A tag is -// a key-value pair. You can associate tags with an object by sending a PUT request -// against the tagging subresource that is associated with the object. You can -// retrieve tags by sending a GET request. For more information, see -// GetObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). For -// tagging-related restrictions related to characters and encodings, see Tag -// Restrictions -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). -// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. To -// use this operation, you must have permission to perform the s3:PutObjectTagging -// action. By default, the bucket owner has this permission and can grant this -// permission to others. To put tags of any other version, use the versionId query -// parameter. You also need permission for the s3:PutObjectVersionTagging action. -// For information about the Amazon S3 object tagging feature, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). Special -// Errors +// This operation is not supported by directory buckets. Sets the supplied tag-set +// to an object that already exists in a bucket. A tag is a key-value pair. For +// more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) +// . You can associate tags with an object by sending a PUT request against the +// tagging subresource that is associated with the object. You can retrieve tags by +// sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// . For tagging-related restrictions related to characters and encodings, see Tag +// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// . Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// To use this operation, you must have permission to perform the +// s3:PutObjectTagging action. By default, the bucket owner has this permission and +// can grant this permission to others. To put tags of any other version, use the +// versionId query parameter. You also need permission for the +// s3:PutObjectVersionTagging action. PutObjectTagging has the following special +// errors. For more Amazon S3 errors see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) +// . +// - InvalidTag - The tag provided was not a valid tag. This error can occur if +// the tag did not pass input validation. For more information, see Object +// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) +// . +// - MalformedXML - The XML provided does not match the schema. +// - OperationAborted - A conflicting conditional action is currently in progress +// against this resource. Please try again. +// - InternalError - The service was unable to apply the provided tag to the +// object. // -// * Code: InvalidTagError -// -// * Cause: The tag provided was not a valid tag. -// This error can occur if the tag did not pass input validation. For more -// information, see Object Tagging -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). -// -// * Code: -// MalformedXMLError -// -// * Cause: The XML provided does not match the schema. -// -// * Code: -// OperationAbortedError -// -// * Cause: A conflicting conditional action is currently in -// progress against this resource. Please try again. -// -// * Code: InternalError -// -// * -// Cause: The service was unable to apply the provided tag to the object. -// -// Related -// Resources -// -// * GetObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) -// -// * -// DeleteObjectTagging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// The following operations are related to PutObjectTagging : +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingInput, optFns ...func(*Options)) (*PutObjectTaggingOutput, error) { if params == nil { params = &PutObjectTaggingInput{} @@ -80,23 +60,23 @@ func (c *Client) PutObjectTagging(ctx context.Context, params *PutObjectTaggingI type PutObjectTaggingInput struct { - // The bucket name containing the object. When using this action with an access - // point, you must direct requests to the access point hostname. The access point + // The bucket name containing the object. Access points - When you use this action + // with an access point, you must provide the alias of the access point in place of + // the bucket name or specify the access point ARN. When using the access point + // ARN, you must direct requests to the access point hostname. The access point // hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -111,13 +91,12 @@ type PutObjectTaggingInput struct { // This member is required. Tagging *types.Tagging - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -127,17 +106,19 @@ type PutObjectTaggingInput struct { // calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // The versionId of the object that the tag-set will be added to. @@ -146,6 +127,11 @@ type PutObjectTaggingInput struct { noSmithyDocumentSerde } +func (in *PutObjectTaggingInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type PutObjectTaggingOutput struct { // The versionId of the object the tag-set was added to. @@ -158,6 +144,9 @@ type PutObjectTaggingOutput struct { } func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutObjectTagging{}, middleware.After) if err != nil { return err @@ -166,6 +155,13 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutObjectTagging"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -184,16 +180,13 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -202,7 +195,10 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutObjectTaggingValidationMiddleware(stack); err != nil { @@ -214,6 +210,9 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutObjectTaggingInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -232,14 +231,29 @@ func (c *Client) addOperationPutObjectTaggingMiddlewares(stack *middleware.Stack if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutObjectTaggingInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutObjectTagging(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutObjectTagging", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go index 922102b5..ab0b5405 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go @@ -4,44 +4,33 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. -// To use this operation, you must have the s3:PutBucketPublicAccessBlock -// permission. For more information about Amazon S3 permissions, see Specifying -// Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). -// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an -// object, it checks the PublicAccessBlock configuration for both the bucket (or -// the bucket that contains the object) and the bucket owner's account. If the +// This operation is not supported by directory buckets. Creates or modifies the +// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketPublicAccessBlock permission. For more +// information about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// . When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or +// an object, it checks the PublicAccessBlock configuration for both the bucket +// (or the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock configurations are different between the bucket and the // account, Amazon S3 uses the most restrictive combination of the bucket-level and // account-level settings. For more information about when Amazon S3 considers a -// bucket or an object public, see The Meaning of "Public" -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). -// Related Resources -// -// * GetPublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) -// -// * -// DeletePublicAccessBlock -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) -// -// * -// GetBucketPolicyStatus -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) -// -// * -// Using Amazon S3 Block Public Access -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// . The following operations are related to PutPublicAccessBlock : +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) func (c *Client) PutPublicAccessBlock(ctx context.Context, params *PutPublicAccessBlockInput, optFns ...func(*Options)) (*PutPublicAccessBlockOutput, error) { if params == nil { params = &PutPublicAccessBlockInput{} @@ -68,20 +57,18 @@ type PutPublicAccessBlockInput struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more // information about when Amazon S3 considers a bucket or object public, see The - // Meaning of "Public" - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) // in the Amazon S3 User Guide. // // This member is required. PublicAccessBlockConfiguration *types.PublicAccessBlockConfiguration - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm @@ -91,14 +78,19 @@ type PutPublicAccessBlockInput struct { // SDKs, this field is calculated automatically. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string noSmithyDocumentSerde } +func (in *PutPublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.UseS3ExpressControlEndpoint = ptr.Bool(true) +} + type PutPublicAccessBlockOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -107,6 +99,9 @@ type PutPublicAccessBlockOutput struct { } func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpPutPublicAccessBlock{}, middleware.After) if err != nil { return err @@ -115,6 +110,13 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutPublicAccessBlock"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -133,16 +135,13 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -151,7 +150,10 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpPutPublicAccessBlockValidationMiddleware(stack); err != nil { @@ -163,6 +165,9 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addPutPublicAccessBlockInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -181,14 +186,29 @@ func (c *Client) addOperationPutPublicAccessBlockMiddlewares(stack *middleware.S if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } + if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil { + return err + } return nil } +func (v *PutPublicAccessBlockInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opPutPublicAccessBlock(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "PutPublicAccessBlock", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go index 45a2aee6..a84b5326 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum" @@ -13,224 +14,154 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" ) -// Restores an archived copy of an object back into Amazon S3 This action is not -// supported by Amazon S3 on Outposts. This action performs the following types of -// requests: +// This operation is not supported by directory buckets. Restores an archived copy +// of an object back into Amazon S3 This functionality is not supported for Amazon +// S3 on Outposts. This action performs the following types of requests: +// - select - Perform a select query on an archived object +// - restore an archive - Restore an archived object // -// * select - Perform a select query on an archived object -// -// * restore an -// archive - Restore an archived object -// -// To use this operation, you must have -// permissions to perform the s3:RestoreObject action. The bucket owner has this -// permission by default and can grant this permission to others. For more -// information about permissions, see Permissions Related to Bucket Subresource -// Operations -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// in the Amazon S3 User Guide. Querying Archives with Select Requests You use a -// select type of request to perform SQL queries on archived objects. The archived -// objects that are being queried by the select request must be formatted as -// uncompressed comma-separated values (CSV) files. You can run queries and custom -// analytics on your archived data without having to restore your data to a hotter -// Amazon S3 tier. For an overview about select requests, see Querying Archived -// Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon S3 User Guide. When making a select request, do the following: -// -// * -// Define an output location for the select query's output. This must be an Amazon -// S3 bucket in the same Amazon Web Services Region as the bucket that contains the -// archive object that is being queried. The Amazon Web Services account that -// initiates the job must have permissions to write to the S3 bucket. You can -// specify the storage class and encryption for the output objects stored in the -// bucket. For more information about output, see Querying Archived Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon S3 User Guide. For more information about the S3 structure in the -// request body, see the following: -// -// * PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) -// -// * Managing -// Access with ACLs -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) in the -// Amazon S3 User Guide -// -// * Protecting Data Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) in -// the Amazon S3 User Guide -// -// * Define the SQL expression for the SELECT type of -// restoration for your query in the request body's SelectParameters structure. You -// can use expressions like the following examples. -// -// * The following expression -// returns all records from the specified object. SELECT * FROM Object -// -// * Assuming -// that you are not using any headers for data stored in the object, you can -// specify columns with positional headers. SELECT s._1, s._2 FROM Object s WHERE -// s._3 > 100 -// -// * If you have headers and you set the fileHeaderInfo in the CSV -// structure in the request body to USE, you can specify headers in the query. (If -// you set the fileHeaderInfo field to IGNORE, the first row is skipped for the -// query.) You cannot mix ordinal positions with header column names. SELECT s.Id, -// s.FirstName, s.SSN FROM S3Object s -// -// For more information about using SQL with S3 -// Glacier Select restore, see SQL Reference for Amazon S3 Select and S3 Glacier -// Select -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide. When making a select request, you can also do the +// For more information about the S3 structure in the request body, see the // following: -// -// * To expedite your queries, specify the Expedited tier. For more -// information about tiers, see "Restoring Archives," later in this topic. -// -// * -// Specify details about the data serialization format of both the input object -// that is being queried and the serialization of the CSV-encoded query -// results. -// -// The following are additional important facts about the select -// feature: -// -// * The output results are new Amazon S3 objects. Unlike archive -// retrievals, they are stored until explicitly deleted-manually or through a -// lifecycle policy. -// -// * You can issue more than one select request on the same -// Amazon S3 object. Amazon S3 doesn't deduplicate requests, so avoid issuing -// duplicate requests. -// -// * Amazon S3 accepts a select request even if the object has -// already been restored. A select request doesn’t return error response -// 409. -// -// Restoring objects Objects that you archive to the S3 Glacier or S3 Glacier -// Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 -// Intelligent-Tiering Deep Archive tiers are not accessible in real time. For -// objects in Archive Access or Deep Archive Access tiers you must first initiate a +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide +// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide +// +// Define the SQL expression for the SELECT type of restoration for your query in +// the request body's SelectParameters structure. You can use expressions like the +// following examples. +// - The following expression returns all records from the specified object. +// SELECT * FROM Object +// - Assuming that you are not using any headers for data stored in the object, +// you can specify columns with positional headers. SELECT s._1, s._2 FROM +// Object s WHERE s._3 > 100 +// - If you have headers and you set the fileHeaderInfo in the CSV structure in +// the request body to USE , you can specify headers in the query. (If you set +// the fileHeaderInfo field to IGNORE , the first row is skipped for the query.) +// You cannot mix ordinal positions with header column names. SELECT s.Id, +// s.FirstName, s.SSN FROM S3Object s +// +// When making a select request, you can also do the following: +// - To expedite your queries, specify the Expedited tier. For more information +// about tiers, see "Restoring Archives," later in this topic. +// - Specify details about the data serialization format of both the input +// object that is being queried and the serialization of the CSV-encoded query +// results. +// +// The following are additional important facts about the select feature: +// - The output results are new Amazon S3 objects. Unlike archive retrievals, +// they are stored until explicitly deleted-manually or through a lifecycle +// configuration. +// - You can issue more than one select request on the same Amazon S3 object. +// Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests. +// - Amazon S3 accepts a select request even if the object has already been +// restored. A select request doesn’t return error response 409 . +// +// Permissions To use this operation, you must have permissions to perform the +// s3:RestoreObject action. The bucket owner has this permission by default and can +// grant this permission to others. For more information about permissions, see +// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. Restoring objects Objects that you archive to the +// S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive +// storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep +// Archive tiers, are not accessible in real time. For objects in the S3 Glacier +// Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage +// classes, you must first initiate a restore request, and then wait until a +// temporary copy of the object is available. If you want a permanent copy of the +// object, create a copy of it in the Amazon S3 Standard storage class in your S3 +// bucket. To access an archived object, you must restore the object for the +// duration (number of days) that you specify. For objects in the Archive Access or +// Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a // restore request, and then wait until the object is moved into the Frequent -// Access tier. For objects in S3 Glacier or S3 Glacier Deep Archive storage -// classes you must first initiate a restore request, and then wait until a -// temporary copy of the object is available. To access an archived object, you -// must restore the object for the duration (number of days) that you specify. To -// restore a specific object version, you can provide a version ID. If you don't -// provide a version ID, Amazon S3 restores the current version. When restoring an -// archived object (or using a select request), you can specify one of the -// following data access tier options in the Tier element of the request body: -// -// * -// Expedited - Expedited retrievals allow you to quickly access your data stored in -// the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier when -// occasional urgent requests for a subset of archives are required. For all but -// the largest archived objects (250 MB+), data accessed using Expedited retrievals -// is typically made available within 1–5 minutes. Provisioned capacity ensures -// that retrieval capacity for Expedited retrievals is available when you need it. -// Expedited retrievals and provisioned capacity are not available for objects -// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering -// Deep Archive tier. -// -// * Standard - Standard retrievals allow you to access any of -// your archived objects within several hours. This is the default option for -// retrieval requests that do not specify the retrieval option. Standard retrievals -// typically finish within 3–5 hours for objects stored in the S3 Glacier storage -// class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 -// hours for objects stored in the S3 Glacier Deep Archive storage class or S3 -// Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects -// stored in S3 Intelligent-Tiering. -// -// * Bulk - Bulk retrievals are the lowest-cost -// retrieval option in S3 Glacier, enabling you to retrieve large amounts, even -// petabytes, of data inexpensively. Bulk retrievals typically finish within 5–12 -// hours for objects stored in the S3 Glacier storage class or S3 -// Intelligent-Tiering Archive tier. They typically finish within 48 hours for -// objects stored in the S3 Glacier Deep Archive storage class or S3 -// Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects -// stored in S3 Intelligent-Tiering. -// -// For more information about archive retrieval -// options and provisioned capacity for Expedited data access, see Restoring -// Archived Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) in the -// Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to change the -// restore speed to a faster speed while it is in progress. For more information, -// see Upgrading the speed of an in-progress restore -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// Access tier. To restore a specific object version, you can provide a version ID. +// If you don't provide a version ID, Amazon S3 restores the current version. When +// restoring an archived object, you can specify one of the following data access +// tier options in the Tier element of the request body: +// - Expedited - Expedited retrievals allow you to quickly access your data +// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or +// S3 Intelligent-Tiering Archive tier when occasional urgent requests for +// restoring archives are required. For all but the largest archived objects (250 +// MB+), data accessed using Expedited retrievals is typically made available +// within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for +// Expedited retrievals is available when you need it. Expedited retrievals and +// provisioned capacity are not available for objects stored in the S3 Glacier Deep +// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// - Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval requests +// that do not specify the retrieval option. Standard retrievals typically finish +// within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval +// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They +// typically finish within 12 hours for objects stored in the S3 Glacier Deep +// Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard +// retrievals are free for objects stored in S3 Intelligent-Tiering. +// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible +// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve +// large amounts, even petabytes, of data at no cost. Bulk retrievals typically +// finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval +// Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk +// retrievals are also the lowest-cost retrieval option when restoring objects from +// S3 Glacier Deep Archive. They typically finish within 48 hours for objects +// stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. You can use Amazon S3 restore speed upgrade to +// change the restore speed to a faster speed while it is in progress. For more +// information, see Upgrading the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) // in the Amazon S3 User Guide. To get the status of object restoration, you can // send a HEAD request. Operations return the x-amz-restore header, which provides // information about the restoration status, in the response. You can use Amazon S3 // event notifications to notify you when a restore is initiated or completed. For -// more information, see Configuring Amazon S3 Event Notifications -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the -// Amazon S3 User Guide. After restoring an archived object, you can update the -// restoration period by reissuing the request with a new period. Amazon S3 updates -// the restoration period relative to the current time and charges only for the -// request-there are no data transfer charges. You cannot update the restoration -// period when Amazon S3 is actively processing your current restore request for -// the object. If your bucket has a lifecycle configuration with a rule that -// includes an expiration action, the object expiration overrides the life span -// that you specify in a restore request. For example, if you restore an object -// copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 -// deletes the object in 3 days. For more information about lifecycle -// configuration, see PutBucketLifecycleConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) -// and Object Lifecycle Management -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) in -// Amazon S3 User Guide. Responses A successful action returns either the 200 OK or -// 202 Accepted status code. -// -// * If the object is not previously restored, then -// Amazon S3 returns 202 Accepted in the response. -// -// * If the object is previously -// restored, Amazon S3 returns 200 OK in the response. -// -// # Special Errors +// more information, see Configuring Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon S3 User Guide. After restoring an archived object, you can update +// the restoration period by reissuing the request with a new period. Amazon S3 +// updates the restoration period relative to the current time and charges only for +// the request-there are no data transfer charges. You cannot update the +// restoration period when Amazon S3 is actively processing your current restore +// request for the object. If your bucket has a lifecycle configuration with a rule +// that includes an expiration action, the object expiration overrides the life +// span that you specify in a restore request. For example, if you restore an +// object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon +// S3 deletes the object in 3 days. For more information about lifecycle +// configuration, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon S3 User Guide. Responses A successful action returns either the 200 OK +// or 202 Accepted status code. // -// * Code: -// RestoreAlreadyInProgress +// - If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. // -// * Cause: Object restore is already in progress. (This -// error does not apply to SELECT type requests.) +// - If the object is previously restored, Amazon S3 returns 200 OK in the +// response. // -// * HTTP Status Code: 409 -// Conflict +// - Special errors: // -// * SOAP Fault Code Prefix: Client +// - Code: RestoreAlreadyInProgress // -// * Code: -// GlacierExpeditedRetrievalNotAvailable +// - Cause: Object restore is already in progress. (This error does not apply to +// SELECT type requests.) // -// * Cause: expedited retrievals are -// currently not available. Try again later. (Returned if there is insufficient -// capacity to process the Expedited request. This error applies only to Expedited -// retrievals and not to S3 Standard or Bulk retrievals.) +// - HTTP Status Code: 409 Conflict // -// * HTTP Status Code: -// 503 +// - SOAP Fault Code Prefix: Client // -// * SOAP Fault Code Prefix: N/A +// - Code: GlacierExpeditedRetrievalNotAvailable // -// # Related Resources +// - Cause: expedited retrievals are currently not available. Try again later. +// (Returned if there is insufficient capacity to process the Expedited request. +// This error applies only to Expedited retrievals and not to S3 Standard or Bulk +// retrievals.) // -// * -// PutBucketLifecycleConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// - HTTP Status Code: 503 // -// * -// GetBucketNotificationConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// - SOAP Fault Code Prefix: N/A // -// * -// SQL Reference for Amazon S3 Select and S3 Glacier Select -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide +// The following operations are related to RestoreObject : +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, optFns ...func(*Options)) (*RestoreObjectOutput, error) { if params == nil { params = &RestoreObjectInput{} @@ -248,23 +179,23 @@ func (c *Client) RestoreObject(ctx context.Context, params *RestoreObjectInput, type RestoreObjectInput struct { - // The bucket name containing the object to restore. When using this action with an - // access point, you must direct requests to the access point hostname. The access - // point hostname takes the form + // The bucket name containing the object to restore. Access points - When you use + // this action with an access point, you must provide the alias of the access point + // in place of the bucket name or specify the access point ARN. When using the + // access point ARN, you must direct requests to the access point hostname. The + // access point hostname takes the form // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this // action with an access point through the Amazon Web Services SDKs, you provide // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. S3 on Outposts - When you use this action with + // Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. + // The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -274,28 +205,29 @@ type RestoreObjectInput struct { // This member is required. Key *string - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. ChecksumAlgorithm types.ChecksumAlgorithm - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer // Container for restore job parameters. @@ -307,10 +239,15 @@ type RestoreObjectInput struct { noSmithyDocumentSerde } +func (in *RestoreObjectInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type RestoreObjectOutput struct { // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Indicates the path in the provided S3 output location where Select results will @@ -324,6 +261,9 @@ type RestoreObjectOutput struct { } func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpRestoreObject{}, middleware.After) if err != nil { return err @@ -332,6 +272,13 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "RestoreObject"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -350,16 +297,13 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -368,7 +312,10 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpRestoreObjectValidationMiddleware(stack); err != nil { @@ -380,6 +327,9 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addRestoreObjectInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -398,20 +348,32 @@ func (c *Client) addOperationRestoreObjectMiddlewares(stack *middleware.Stack, o if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *RestoreObjectInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opRestoreObject(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "RestoreObject", } } -// getRestoreObjectRequestAlgorithmMember gets the request checksum algorithm value -// provided as input. +// getRestoreObjectRequestAlgorithmMember gets the request checksum algorithm +// value provided as input. func getRestoreObjectRequestAlgorithmMember(input interface{}) (string, bool) { in := input.(*RestoreObjectInput) if len(in.ChecksumAlgorithm) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go index 1b9dbc7b..888ec9c2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_SelectObjectContent.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" @@ -14,93 +15,67 @@ import ( "sync" ) -// This action filters the contents of an Amazon S3 object based on a simple -// structured query language (SQL) statement. In the request, along with the SQL -// expression, you must also specify a data serialization format (JSON, CSV, or -// Apache Parquet) of the object. Amazon S3 uses this format to parse object data -// into records, and returns only records that match the specified SQL expression. -// You must also specify the data serialization format for the response. This -// action is not supported by Amazon S3 on Outposts. For more information about -// Amazon S3 Select, see Selecting Content from Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// and SELECT Command -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) -// in the Amazon S3 User Guide. For more information about using SQL with Amazon S3 -// Select, see SQL Reference for Amazon S3 Select and S3 Glacier Select -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon S3 User Guide. Permissions You must have s3:GetObject permission -// for this operation. Amazon S3 Select does not support anonymous access. For more -// information about permissions, see Specifying Permissions in a Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in -// the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to +// This operation is not supported by directory buckets. This action filters the +// contents of an Amazon S3 object based on a simple structured query language +// (SQL) statement. In the request, along with the SQL expression, you must also +// specify a data serialization format (JSON, CSV, or Apache Parquet) of the +// object. Amazon S3 uses this format to parse object data into records, and +// returns only records that match the specified SQL expression. You must also +// specify the data serialization format for the response. This functionality is +// not supported for Amazon S3 on Outposts. For more information about Amazon S3 +// Select, see Selecting Content from Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) +// in the Amazon S3 User Guide. Permissions You must have the s3:GetObject +// permission for this operation. Amazon S3 Select does not support anonymous +// access. For more information about permissions, see Specifying Permissions in a +// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. Object Data Formats You can use Amazon S3 Select to // query objects that have the following format properties: +// - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports +// for CSV and JSON files. Amazon S3 Select supports columnar compression for +// Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// - Server-side encryption - Amazon S3 Select supports querying objects that +// are protected with server-side encryption. For objects that are encrypted with +// customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use +// the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// . For more information about SSE-C, see Server-Side Encryption (Using +// Customer-Provided Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 +// managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. For +// more information about server-side encryption, including SSE-S3 and SSE-KMS, see +// Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide. // -// * CSV, JSON, and -// Parquet - Objects must be in CSV, JSON, or Parquet format. +// Working with the Response Body Given the response size is unknown, Amazon S3 +// Select streams the response as a series of messages and includes a +// Transfer-Encoding header with chunked as its value in the response. For more +// information, see Appendix: SelectObjectContent Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) +// . GetObject Support The SelectObjectContent action does not support the +// following GetObject functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// . +// - Range : Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an object +// to return. +// - The GLACIER , DEEP_ARCHIVE , and REDUCED_REDUNDANCY storage classes, or the +// ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING +// storage class: You cannot query objects in the GLACIER , DEEP_ARCHIVE , or +// REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS or +// DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage class. For +// more information about storage classes, see Using Amazon S3 storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) +// in the Amazon S3 User Guide. // -// * UTF-8 - UTF-8 is -// the only encoding type Amazon S3 Select supports. -// -// * GZIP or BZIP2 - CSV and -// JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only -// compression formats that Amazon S3 Select supports for CSV and JSON files. -// Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. -// Amazon S3 Select does not support whole-object compression for Parquet -// objects. -// -// * Server-side encryption - Amazon S3 Select supports querying objects -// that are protected with server-side encryption. For objects that are encrypted -// with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must -// use the headers that are documented in the GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). For more -// information about SSE-C, see Server-Side Encryption (Using Customer-Provided -// Encryption Keys) -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon S3 User Guide. For objects that are encrypted with Amazon S3 -// managed encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), -// server-side encryption is handled transparently, so you don't need to specify -// anything. For more information about server-side encryption, including SSE-S3 -// and SSE-KMS, see Protecting Data Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) in -// the Amazon S3 User Guide. -// -// Working with the Response Body Given the response -// size is unknown, Amazon S3 Select streams the response as a series of messages -// and includes a Transfer-Encoding header with chunked as its value in the -// response. For more information, see Appendix: SelectObjectContent Response -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html). -// GetObject Support The SelectObjectContent action does not support the following -// GetObject functionality. For more information, see GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). -// -// * Range: -// Although you can specify a scan range for an Amazon S3 Select request (see -// SelectObjectContentRequest - ScanRange -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) -// in the request parameters), you cannot specify the range of bytes of an object -// to return. -// -// * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You -// cannot specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. -// For more information, about storage classes see Storage Classes -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) -// in the Amazon S3 User Guide. -// -// Special Errors For a list of special errors for -// this operation, see List of SELECT Object Content Error Codes -// (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) -// Related Resources -// -// * GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) -// -// * -// GetBucketLifecycleConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) -// -// * -// PutBucketLifecycleConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// Special Errors For a list of special errors for this operation, see List of +// SELECT Object Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// The following operations are related to SelectObjectContent : +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectContentInput, optFns ...func(*Options)) (*SelectObjectContentOutput, error) { if params == nil { params = &SelectObjectContentInput{} @@ -122,8 +97,8 @@ func (c *Client) SelectObjectContent(ctx context.Context, params *SelectObjectCo // object. Amazon S3 uses this to parse object data into records. It returns only // records that match the specified SQL expression. You must also specify the data // serialization format for the response. For more information, see S3Select API -// Documentation -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +// Documentation (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html) +// . type SelectObjectContentInput struct { // The S3 bucket. @@ -156,9 +131,9 @@ type SelectObjectContentInput struct { // This member is required. OutputSerialization *types.OutputSerialization - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Specifies if periodic request progress information should be enabled. @@ -166,44 +141,41 @@ type SelectObjectContentInput struct { // The server-side encryption (SSE) algorithm used to encrypt the object. This // parameter is needed only when the object was created using a checksum algorithm. - // For more information, see Protecting data using SSE-C keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. SSECustomerAlgorithm *string // The server-side encryption (SSE) customer managed key. This parameter is needed // only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. SSECustomerKey *string // The MD5 server-side encryption (SSE) customer managed key. This parameter is // needed only when the object was created using a checksum algorithm. For more - // information, see Protecting data using SSE-C keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon S3 User Guide. SSECustomerKeyMD5 *string // Specifies the byte range of the object to get the records from. A record is // processed when its first byte is contained by the range. This parameter is // optional, but when specified, it must not be empty. See RFC 2616, Section - // 14.35.1 about how to specify the start and end of the range. ScanRangemay be + // 14.35.1 about how to specify the start and end of the range. ScanRange may be // used in the following ways: - // - // * 50100 - process only the records starting between - // the bytes 50 and 100 (inclusive, counting from zero) - // - // * 50 - process only the - // records starting after the byte 50 - // - // * 50 - process only the records within the - // last 50 bytes of the file. + // - 50100 - process only the records starting between the bytes 50 and 100 + // (inclusive, counting from zero) + // - 50 - process only the records starting after the byte 50 + // - 50 - process only the records within the last 50 bytes of the file. ScanRange *types.ScanRange noSmithyDocumentSerde } +func (in *SelectObjectContentInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + +} + type SelectObjectContentOutput struct { eventStream *SelectObjectContentEventStream @@ -219,6 +191,9 @@ func (o *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream } func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpSelectObjectContent{}, middleware.After) if err != nil { return err @@ -227,6 +202,13 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "SelectObjectContent"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addEventStreamSelectObjectContentMiddleware(stack, options); err != nil { return err } @@ -248,19 +230,19 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpSelectObjectContentValidationMiddleware(stack); err != nil { @@ -272,6 +254,9 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addSelectObjectContentUpdateEndpoint(stack, options); err != nil { return err } @@ -287,14 +272,26 @@ func (c *Client) addOperationSelectObjectContentMiddlewares(stack *middleware.St if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *SelectObjectContentInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opSelectObjectContent(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "SelectObjectContent", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go index 18d30a10..53507fba 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go @@ -4,6 +4,7 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" @@ -15,104 +16,99 @@ import ( "io" ) -// Uploads a part in a multipart upload. In this operation, you provide part data -// in your request. However, you have an option to specify your existing Amazon S3 -// object as a data source for the part you are uploading. To upload a part from an -// existing object, you use the UploadPartCopy -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) -// operation. You must initiate a multipart upload (see CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) -// before you can upload any part. In response to your initiate request, Amazon S3 -// returns an upload ID, a unique identifier, that you must include in your upload -// part request. Part numbers can be any number from 1 to 10,000, inclusive. A part -// number uniquely identifies a part and also defines its position within the -// object being created. If you upload a new part using the same part number that -// was used with a previous part, the previously uploaded part is overwritten. For -// information about maximum and minimum part sizes and other multipart upload -// specifications, see Multipart upload limits -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the -// Amazon S3 User Guide. To ensure that data is not corrupted when traversing the -// network, specify the Content-MD5 header in the upload part request. Amazon S3 -// checks the part data against the provided MD5 value. If they do not match, -// Amazon S3 returns an error. If the upload request is signed with Signature -// Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a -// checksum instead of Content-MD5. For more information see Authenticating -// Requests: Using the Authorization Header (Amazon Web Services Signature Version -// 4) -// (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). -// Note: After you initiate multipart upload and upload one or more parts, you must -// either complete or abort multipart upload in order to stop getting charged for -// storage of the uploaded parts. Only after you either complete or abort multipart -// upload, Amazon S3 frees up the parts storage and stops charging you for the -// parts storage. For more information on multipart uploads, go to Multipart Upload -// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in -// the Amazon S3 User Guide . For information on the permissions required to use -// the multipart upload API, go to Multipart Upload and Permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the -// Amazon S3 User Guide. You can optionally request server-side encryption where -// Amazon S3 encrypts your data as it writes it to disks in its data centers and -// decrypts it for you when you access it. You have the option of providing your -// own encryption key, or you can use the Amazon Web Services managed encryption -// keys. If you choose to provide your own encryption key, the request headers you -// provide in the request must match the headers you used in the request to -// initiate the upload by using CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). -// For more information, go to Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon S3 User Guide. Server-side encryption is supported by the S3 -// Multipart Upload actions. Unless you are using a customer-provided encryption -// key, you don't need to specify the encryption parameters in each UploadPart -// request. Instead, you only need to specify the server-side encryption parameters -// in the initial Initiate Multipart request. For more information, see -// CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). -// If you requested server-side encryption using a customer-provided encryption key -// in your initiate multipart upload request, you must provide identical encryption -// information in each part upload using the following headers. +// Uploads a part in a multipart upload. In this operation, you provide new data +// as a part of an object in your request. However, you have an option to specify +// your existing Amazon S3 object as a data source for the part you are uploading. +// To upload a part from an existing object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// ) before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier that you must include in your +// upload part request. Part numbers can be any number from 1 to 10,000, inclusive. +// A part number uniquely identifies a part and also defines its position within +// the object being created. If you upload a new part using the same part number +// that was used with a previous part, the previously uploaded part is overwritten. +// For information about maximum and minimum part sizes and other multipart upload +// specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. After you initiate multipart upload and upload one +// or more parts, you must either complete or abort multipart upload in order to +// stop getting charged for storage of the uploaded parts. Only after you either +// complete or abort multipart upload, Amazon S3 frees up the parts storage and +// stops charging you for the parts storage. For more information on multipart +// uploads, go to Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) +// in the Amazon S3 User Guide . Directory buckets - For directory buckets, you +// must make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Permissions +// - General purpose bucket permissions - For information on the permissions +// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - To grant access to this API operation on a +// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant the +// s3express:CreateSession permission to the directory bucket in a bucket policy +// or an IAM identity-based policy. Then, you make the CreateSession API call on +// the bucket to obtain a session token. With the session token in your request +// header, you can make API requests to this operation. After the session token +// expires, you make another CreateSession API call to generate a new session +// token for use. Amazon Web Services CLI or SDKs create session and refresh the +// session token automatically to avoid service interruptions when a session +// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// . // -// * -// x-amz-server-side-encryption-customer-algorithm +// Data integrity General purpose bucket - To ensure that data is not corrupted +// traversing the network, specify the Content-MD5 header in the upload part +// request. Amazon S3 checks the part data against the provided MD5 value. If they +// do not match, Amazon S3 returns an error. If the upload request is signed with +// Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 +// header as a checksum instead of Content-MD5 . For more information see +// Authenticating Requests: Using the Authorization Header (Amazon Web Services +// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html) +// . Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. Encryption +// - General purpose bucket - Server-side encryption is for data encryption at +// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers +// and decrypts it when you access it. You have mutually exclusive options to +// protect data using server-side encryption in Amazon S3, depending on how you +// choose to manage the encryption keys. Specifically, the encryption key options +// are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and +// Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side +// encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally +// tell Amazon S3 to encrypt data at rest using server-side encryption with other +// key options. The option you use depends on whether you want to use KMS keys +// (SSE-KMS) or provide your own encryption key (SSE-C). Server-side encryption is +// supported by the S3 Multipart Upload operations. Unless you are using a +// customer-provided encryption key (SSE-C), you don't need to specify the +// encryption parameters in each UploadPart request. Instead, you only need to +// specify the server-side encryption parameters in the initial Initiate Multipart +// request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// . If you request server-side encryption using a customer-provided encryption key +// (SSE-C) in your initiate multipart upload request, you must provide identical +// encryption information in each part upload using the following request headers. +// - x-amz-server-side-encryption-customer-algorithm +// - x-amz-server-side-encryption-customer-key +// - x-amz-server-side-encryption-customer-key-MD5 +// - Directory bucket - For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. // -// * -// x-amz-server-side-encryption-customer-key +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. Special errors +// - Error Code: NoSuchUpload +// - Description: The specified multipart upload does not exist. The upload ID +// might be invalid, or the multipart upload might have been aborted or completed. +// - HTTP Status Code: 404 Not Found +// - SOAP Fault Code Prefix: Client // -// * -// x-amz-server-side-encryption-customer-key-MD5 -// -// # Special Errors -// -// * Code: -// NoSuchUpload -// -// * Cause: The specified multipart upload does not exist. The upload -// ID might be invalid, or the multipart upload might have been aborted or -// completed. -// -// * HTTP Status Code: 404 Not Found -// -// * SOAP Fault Code Prefix: -// Client -// -// # Related Resources -// -// * CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// * -// CompleteMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// * -// AbortMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// * -// ListParts -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// * -// ListMultipartUploads -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to UploadPart : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns ...func(*Options)) (*UploadPartOutput, error) { if params == nil { params = &UploadPartInput{} @@ -130,23 +126,31 @@ func (c *Client) UploadPart(ctx context.Context, params *UploadPartInput, optFns type UploadPartInput struct { - // The name of the bucket to which the multipart upload was initiated. When using - // this action with an access point, you must direct requests to the access point - // hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The name of the bucket to which the multipart upload was initiated. Directory + // buckets - When you use this operation with a directory bucket, you must use + // virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string @@ -160,7 +164,7 @@ type UploadPartInput struct { // 10,000. // // This member is required. - PartNumber int32 + PartNumber *int32 // Upload ID identifying the multipart upload whose part is being uploaded. // @@ -170,13 +174,12 @@ type UploadPartInput struct { // Object data. Body io.Reader - // Indicates the algorithm used to create the checksum for the object when using - // the SDK. This header will not provide any additional functionality if not using - // the SDK. When sending this header, there must be a corresponding x-amz-checksum - // or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the - // HTTP status code 400 Bad Request. For more information, see Checking object - // integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Indicates the algorithm used to create the checksum for the object when you use + // the SDK. This header will not provide any additional functionality if you don't + // use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request . For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3 // ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must // be the same for all parts and it match the checksum value supplied in the @@ -186,112 +189,124 @@ type UploadPartInput struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 32-bit CRC32C checksum of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 160-bit SHA-1 digest of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string // Size of the body in bytes. This parameter is useful when the size of the body // cannot be determined automatically. - ContentLength int64 + ContentLength *int64 // The base64-encoded 128-bit MD5 digest of the part data. This parameter is // auto-populated when using the command from the CLI. This parameter is required - // if object lock parameters are specified. + // if object lock parameters are specified. This functionality is not supported for + // directory buckets. ContentMD5 *string - // The account ID of the expected bucket owner. If the bucket is owned by a - // different account, the request fails with the HTTP status code 403 Forbidden - // (access denied). + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the HTTP + // status code 403 Forbidden (access denied). ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported for directory buckets. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in // encrypting data. This value is used to store the object and then it is // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the - // x-amz-server-side-encryption-customer-algorithm header. This must be the same - // encryption key specified in the initiate multipart upload request. + // x-amz-server-side-encryption-customer-algorithm header . This must be the same + // encryption key specified in the initiate multipart upload request. This + // functionality is not supported for directory buckets. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported for directory buckets. SSECustomerKeyMD5 *string noSmithyDocumentSerde } +func (in *UploadPartInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.Key = in.Key + +} + type UploadPartOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -299,25 +314,29 @@ type UploadPartOutput struct { ETag *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm used. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key was used for the - // object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -327,6 +346,9 @@ type UploadPartOutput struct { } func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPart{}, middleware.After) if err != nil { return err @@ -335,6 +357,13 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPart"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -353,16 +382,13 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -371,7 +397,10 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpUploadPartValidationMiddleware(stack); err != nil { @@ -386,6 +415,9 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = add100Continue(stack, options); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addUploadPartInputChecksumMiddlewares(stack, options); err != nil { return err } @@ -407,14 +439,26 @@ func (c *Client) addOperationUploadPartMiddlewares(stack *middleware.Stack, opti if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *UploadPartInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opUploadPart(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "UploadPart", } } @@ -439,8 +483,9 @@ func addUploadPartInputChecksumMiddlewares(stack *middleware.Stack, options Opti }) } -// getUploadPartBucketMember returns a pointer to string denoting a provided bucket -// member valueand a boolean indicating if the input has a modeled bucket name, +// getUploadPartBucketMember returns a pointer to string denoting a provided +// bucket member valueand a boolean indicating if the input has a modeled bucket +// name, func getUploadPartBucketMember(input interface{}) (*string, bool) { in := input.(*UploadPartInput) if in.Bucket == nil { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go index f1fa1395..1d48a7be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go @@ -4,123 +4,104 @@ package s3 import ( "context" + "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" "time" ) -// Uploads a part by copying data from an existing object as data source. You -// specify the data source by adding the request header x-amz-copy-source in your -// request and a byte range by adding the request header x-amz-copy-source-range in -// your request. For information about maximum and minimum part sizes and other -// multipart upload specifications, see Multipart upload limits -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) in the -// Amazon S3 User Guide. Instead of using an existing object as part data, you -// might use the UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action and -// provide data in your request. You must initiate a multipart upload before you -// can upload any part. In response to your initiate request. Amazon S3 returns a -// unique identifier, the upload ID, that you must include in your upload part -// request. For more information about using the UploadPartCopy operation, see the -// following: +// Uploads a part by copying data from an existing object as data source. To +// specify the data source, you add the request header x-amz-copy-source in your +// request. To specify a byte range, you add the request header +// x-amz-copy-source-range in your request. For information about maximum and +// minimum part sizes and other multipart upload specifications, see Multipart +// upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. Instead of copying data from an existing object as +// part data, you might use the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// action to upload new data as a part of an object in your request. You must +// initiate a multipart upload before you can upload any part. In response to your +// initiate request, Amazon S3 returns the upload ID, a unique identifier that you +// must include in your upload part request. For conceptual information about +// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. For information about copying objects using a +// single atomic action vs. a multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must +// make requests for this API operation to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format +// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. Authentication and authorization All UploadPartCopy +// requests must be authenticated and signed by using IAM credentials (access key +// ID and secret access key for the IAM identities). All headers with the x-amz- +// prefix, including x-amz-copy-source , must be signed. For more information, see +// REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) +// . Directory buckets - You must use IAM credentials to authenticate and authorize +// your access to the UploadPartCopy API operation, instead of using the temporary +// security credentials through the CreateSession API operation. Amazon Web +// Services CLI or SDKs handles authentication and authorization on your behalf. +// Permissions You must have READ access to the source object and WRITE access to +// the destination bucket. +// - General purpose bucket permissions - You must have the permissions in a +// policy based on the bucket types of your source bucket and destination bucket in +// an UploadPartCopy operation. +// - If the source object is in a general purpose bucket, you must have the +// s3:GetObject permission to read the source object that is being copied. +// - If the destination bucket is a general purpose bucket, you must have the +// s3:PubObject permission to write the object copy to the destination bucket. +// For information about permissions required to use the multipart upload API, see +// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// - Directory bucket permissions - You must have permissions in a bucket policy +// or an IAM identity-based policy based on the source and destination bucket types +// in an UploadPartCopy operation. +// - If the source object that you want to copy is in a directory bucket, you +// must have the s3express:CreateSession permission in the Action element of a +// policy to read the object . By default, the session is in the ReadWrite mode. +// If you want to restrict the access, you can explicitly set the +// s3express:SessionMode condition key to ReadOnly on the copy source bucket. +// - If the copy destination is a directory bucket, you must have the +// s3express:CreateSession permission in the Action element of a policy to write +// the object to the destination. The s3express:SessionMode condition key cannot +// be set to ReadOnly on the copy destination. For example policies, see Example +// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. // -// * For conceptual information about multipart uploads, see Uploading -// Objects Using Multipart Upload -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) in the -// Amazon S3 User Guide. +// Encryption +// - General purpose buckets - For information about using server-side +// encryption with customer-provided encryption keys with the UploadPartCopy +// operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// . +// - Directory buckets - For directory buckets, only server-side encryption with +// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. // -// * For information about permissions required to use the -// multipart upload API, see Multipart Upload and Permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) in the -// Amazon S3 User Guide. +// Special errors +// - Error Code: NoSuchUpload +// - Description: The specified multipart upload does not exist. The upload ID +// might be invalid, or the multipart upload might have been aborted or completed. +// - HTTP Status Code: 404 Not Found +// - Error Code: InvalidRequest +// - Description: The specified copy source is not supported as a byte-range +// copy source. +// - HTTP Status Code: 400 Bad Request // -// * For information about copying objects using a single -// atomic action vs. a multipart upload, see Operations on Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in the -// Amazon S3 User Guide. -// -// * For information about using server-side encryption with -// customer-provided encryption keys with the UploadPartCopy operation, see -// CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) -// and UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). -// -// Note the -// following additional considerations about the request headers -// x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, -// x-amz-copy-source-if-unmodified-since, and -// x-amz-copy-source-if-modified-since: -// -// * Consideration 1 - If both of the -// x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since headers are -// present in the request as follows: x-amz-copy-source-if-match condition -// evaluates to true, and; x-amz-copy-source-if-unmodified-since condition -// evaluates to false; Amazon S3 returns 200 OK and copies the data. -// -// * -// Consideration 2 - If both of the x-amz-copy-source-if-none-match and -// x-amz-copy-source-if-modified-since headers are present in the request as -// follows: x-amz-copy-source-if-none-match condition evaluates to false, and; -// x-amz-copy-source-if-modified-since condition evaluates to true; Amazon S3 -// returns 412 Precondition Failed response code. -// -// Versioning If your bucket has -// versioning enabled, you could have multiple versions of the same object. By -// default, x-amz-copy-source identifies the current version of the object to copy. -// If the current version is a delete marker and you don't specify a versionId in -// the x-amz-copy-source, Amazon S3 returns a 404 error, because the object does -// not exist. If you specify versionId in the x-amz-copy-source and the versionId -// is a delete marker, Amazon S3 returns an HTTP 400 error, because you are not -// allowed to specify a delete marker as a version for the x-amz-copy-source. You -// can optionally specify a specific version of the source object to copy by adding -// the versionId subresource as shown in the following example: x-amz-copy-source: -// /bucket/object?versionId=version id Special Errors -// -// * Code: NoSuchUpload -// -// * -// Cause: The specified multipart upload does not exist. The upload ID might be -// invalid, or the multipart upload might have been aborted or completed. -// -// * HTTP -// Status Code: 404 Not Found -// -// * Code: InvalidRequest -// -// * Cause: The specified copy -// source is not supported as a byte-range copy source. -// -// * HTTP Status Code: 400 -// Bad Request -// -// # Related Resources -// -// * CreateMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) -// -// * -// UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) -// -// * -// CompleteMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) -// -// * -// AbortMultipartUpload -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) -// -// * -// ListParts -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// -// * -// ListMultipartUploads -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is +// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are +// related to UploadPartCopy : +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput, optFns ...func(*Options)) (*UploadPartCopyOutput, error) { if params == nil { params = &UploadPartCopyInput{} @@ -138,59 +119,72 @@ func (c *Client) UploadPartCopy(ctx context.Context, params *UploadPartCopyInput type UploadPartCopyInput struct { - // The bucket name. When using this action with an access point, you must direct - // requests to the access point hostname. The access point hostname takes the form - // AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this - // action with an access point through the Amazon Web Services SDKs, you provide - // the access point ARN in place of the bucket name. For more information about - // access point ARNs, see Using access points - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) - // in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts, - // you must direct requests to the S3 on Outposts hostname. The S3 on Outposts - // hostname takes the form - // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using - // this action with S3 on Outposts through the Amazon Web Services SDKs, you - // provide the Outposts bucket ARN in place of the bucket name. For more - // information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the - // Amazon S3 User Guide. + // The bucket name. Directory buckets - When you use this operation with a + // directory bucket, you must use virtual-hosted-style requests in the format + // Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not + // supported. Directory bucket names must be unique in the chosen Availability + // Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for + // example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket + // naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. Access points - When you use this action with an + // access point, you must provide the alias of the access point in place of the + // bucket name or specify the access point ARN. When using the access point ARN, + // you must direct requests to the access point hostname. The access point hostname + // takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. Access points and Object Lambda access points are + // not supported by directory buckets. S3 on Outposts - When you use this action + // with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts + // hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you + // use this action with S3 on Outposts through the Amazon Web Services SDKs, you + // provide the Outposts access point ARN in place of the bucket name. For more + // information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // This member is required. Bucket *string - // Specifies the source object for the copy operation. You specify the value in one - // of two formats, depending on whether you want to access the source object - // through an access point - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): - // - // * - // For objects not accessed through an access point, specify the name of the source - // bucket and key of the source object, separated by a slash (/). For example, to - // copy the object reports/january.pdf from the bucket awsexamplebucket, use - // awsexamplebucket/reports/january.pdf. The value must be URL-encoded. - // - // * For - // objects accessed through access points, specify the Amazon Resource Name (ARN) - // of the object as accessed through the access point, in the format - // arn:aws:s3:::accesspoint//object/. For example, to copy the object - // reports/january.pdf through access point my-access-point owned by account - // 123456789012 in Region us-west-2, use the URL encoding of - // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. - // The value must be URL encoded. Amazon S3 supports copy operations using access - // points only when the source and destination buckets are in the same Amazon Web - // Services Region. Alternatively, for objects accessed through Amazon S3 on - // Outposts, specify the ARN of the object as accessed in the format - // arn:aws:s3-outposts:::outpost//object/. For example, to copy the object - // reports/january.pdf through outpost my-outpost owned by account 123456789012 in - // Region us-west-2, use the URL encoding of - // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. - // The value must be URL-encoded. - // - // To copy a specific version of an object, append - // ?versionId= to the value (for example, - // awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). - // If you don't specify a version ID, Amazon S3 copies the latest version of the - // source object. + // Specifies the source object for the copy operation. You specify the value in + // one of two formats, depending on whether you want to access the source object + // through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html) + // : + // - For objects not accessed through an access point, specify the name of the + // source bucket and key of the source object, separated by a slash (/). For + // example, to copy the object reports/january.pdf from the bucket + // awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must + // be URL-encoded. + // - For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the format + // arn:aws:s3:::accesspoint//object/ . For example, to copy the object + // reports/january.pdf through access point my-access-point owned by account + // 123456789012 in Region us-west-2 , use the URL encoding of + // arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf + // . The value must be URL encoded. + // - Amazon S3 supports copy operations using Access points only when the source + // and destination buckets are in the same Amazon Web Services Region. + // - Access points are not supported by directory buckets. Alternatively, for + // objects accessed through Amazon S3 on Outposts, specify the ARN of the object as + // accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example, + // to copy the object reports/january.pdf through outpost my-outpost owned by + // account 123456789012 in Region us-west-2 , use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf + // . The value must be URL-encoded. + // If your bucket has versioning enabled, you could have multiple versions of the + // same object. By default, x-amz-copy-source identifies the current version of + // the source object to copy. To copy a specific version of the source object to + // copy, append ?versionId= to the x-amz-copy-source request header (for example, + // x-amz-copy-source: + // /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893 + // ). If the current version is a delete marker and you don't specify a versionId + // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found + // error, because the object does not exist. If you specify versionId in the + // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns an + // HTTP 400 Bad Request error, because you are not allowed to specify a delete + // marker as a version for the x-amz-copy-source . Directory buckets - S3 + // Versioning isn't enabled and supported for directory buckets. // // This member is required. CopySource *string @@ -204,23 +198,41 @@ type UploadPartCopyInput struct { // 10,000. // // This member is required. - PartNumber int32 + PartNumber *int32 // Upload ID identifying the multipart upload whose part is being copied. // // This member is required. UploadId *string - // Copies the object if its entity tag (ETag) matches the specified tag. + // Copies the object if its entity tag (ETag) matches the specified tag. If both + // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: x-amz-copy-source-if-match + // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since + // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. CopySourceIfMatch *string - // Copies the object if it has been modified since the specified time. + // Copies the object if it has been modified since the specified time. If both of + // the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: x-amz-copy-source-if-none-match + // condition evaluates to false , and; x-amz-copy-source-if-modified-since + // condition evaluates to true ; Amazon S3 returns 412 Precondition Failed + // response code. CopySourceIfModifiedSince *time.Time - // Copies the object if its entity tag (ETag) is different than the specified ETag. + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. If both of the x-amz-copy-source-if-none-match and + // x-amz-copy-source-if-modified-since headers are present in the request as + // follows: x-amz-copy-source-if-none-match condition evaluates to false , and; + // x-amz-copy-source-if-modified-since condition evaluates to true ; Amazon S3 + // returns 412 Precondition Failed response code. CopySourceIfNoneMatch *string - // Copies the object if it hasn't been modified since the specified time. + // Copies the object if it hasn't been modified since the specified time. If both + // of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: x-amz-copy-source-if-match + // condition evaluates to true , and; x-amz-copy-source-if-unmodified-since + // condition evaluates to false ; Amazon S3 returns 200 OK and copies the data. CopySourceIfUnmodifiedSince *time.Time // The range of bytes to copy from the source object. The range value must use the @@ -231,39 +243,45 @@ type UploadPartCopyInput struct { CopySourceRange *string // Specifies the algorithm to use when decrypting the source object (for example, - // AES256). + // AES256 ). This functionality is not supported when the source object is in a + // directory bucket. CopySourceSSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one that - // was used when the source object was created. + // was used when the source object was created. This functionality is not supported + // when the source object is in a directory bucket. CopySourceSSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported when the source object is in a directory bucket. CopySourceSSECustomerKeyMD5 *string - // The account ID of the expected destination bucket owner. If the destination - // bucket is owned by a different account, the request fails with the HTTP status - // code 403 Forbidden (access denied). + // The account ID of the expected destination bucket owner. If the account ID that + // you provide does not match the actual owner of the destination bucket, the + // request fails with the HTTP status code 403 Forbidden (access denied). ExpectedBucketOwner *string - // The account ID of the expected source bucket owner. If the source bucket is - // owned by a different account, the request fails with the HTTP status code 403 - // Forbidden (access denied). + // The account ID of the expected source bucket owner. If the account ID that you + // provide does not match the actual owner of the source bucket, the request fails + // with the HTTP status code 403 Forbidden (access denied). ExpectedSourceBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) - // in the Amazon S3 User Guide. + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination S3 bucket has Requester Pays enabled, the requester will + // pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. RequestPayer types.RequestPayer - // Specifies the algorithm to use to when encrypting the object (for example, - // AES256). + // Specifies the algorithm to use when encrypting the object (for example, + // AES256). This functionality is not supported when the destination bucket is a + // directory bucket. SSECustomerAlgorithm *string // Specifies the customer-provided encryption key for Amazon S3 to use in @@ -271,50 +289,64 @@ type UploadPartCopyInput struct { // discarded; Amazon S3 does not store the encryption key. The key must be // appropriate for use with the algorithm specified in the // x-amz-server-side-encryption-customer-algorithm header. This must be the same - // encryption key specified in the initiate multipart upload request. + // encryption key specified in the initiate multipart upload request. This + // functionality is not supported when the destination bucket is a directory + // bucket. SSECustomerKey *string // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure that the - // encryption key was transmitted without error. + // encryption key was transmitted without error. This functionality is not + // supported when the destination bucket is a directory bucket. SSECustomerKeyMD5 *string noSmithyDocumentSerde } +func (in *UploadPartCopyInput) bindEndpointParams(p *EndpointParameters) { + p.Bucket = in.Bucket + p.DisableS3ExpressSessionAuth = ptr.Bool(true) +} + type UploadPartCopyOutput struct { // Indicates whether the multipart upload uses an S3 Bucket Key for server-side - // encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + // encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality + // is not supported for directory buckets. + BucketKeyEnabled *bool // Container for all response elements. CopyPartResult *types.CopyPartResult - // The version of the source object that was copied, if you have enabled versioning - // on the source bucket. + // The version of the source object that was copied, if you have enabled + // versioning on the source bucket. This functionality is not supported when the + // source object is in a directory bucket. CopySourceVersionId *string // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm used. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to confirm the encryption + // algorithm that's used. This functionality is not supported for directory + // buckets. SSECustomerAlgorithm *string - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round-trip message integrity - // verification of the customer-provided encryption key. + // If server-side encryption with a customer-provided encryption key was + // requested, the response will include this header to provide the round-trip + // message integrity verification of the customer-provided encryption key. This + // functionality is not supported for directory buckets. SSECustomerKeyMD5 *string - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for the - // object. + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. This functionality + // is not supported for directory buckets. SSEKMSKeyId *string - // The server-side encryption algorithm used when storing this object in Amazon S3 - // (for example, AES256, aws:kms). + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side + // encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported. ServerSideEncryption types.ServerSideEncryption // Metadata pertaining to the operation's result. @@ -324,6 +356,9 @@ type UploadPartCopyOutput struct { } func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpUploadPartCopy{}, middleware.After) if err != nil { return err @@ -332,6 +367,13 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "UploadPartCopy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -350,16 +392,13 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -368,7 +407,10 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addOpUploadPartCopyValidationMiddleware(stack); err != nil { @@ -380,6 +422,9 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addUploadPartCopyUpdateEndpoint(stack, options); err != nil { return err } @@ -398,14 +443,26 @@ func (c *Client) addOperationUploadPartCopyMiddlewares(stack *middleware.Stack, if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } +func (v *UploadPartCopyInput) bucket() (string, bool) { + if v.Bucket == nil { + return "", false + } + return *v.Bucket, true +} + func newServiceMetadataMiddleware_opUploadPartCopy(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "UploadPartCopy", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go index 78eeadd4..ac90ff70 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go @@ -11,28 +11,28 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" smithy "github.com/aws/smithy-go" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" "io" "strings" "time" ) -// Passes transformed objects to a GetObject operation when using Object Lambda -// access points. For information about Object Lambda access points, see -// Transforming objects with Object Lambda access points -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// This operation is not supported by directory buckets. Passes transformed +// objects to a GetObject operation when using Object Lambda access points. For +// information about Object Lambda access points, see Transforming objects with +// Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) // in the Amazon S3 User Guide. This operation supports metadata that can be -// returned by GetObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), in -// addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. -// The GetObject response metadata is supported so that the WriteGetObjectResponse -// caller, typically an Lambda function, can provide the same metadata when it -// internally invokes GetObject. When WriteGetObjectResponse is called by a -// customer-owned Lambda function, the metadata returned to the end user GetObject -// call might differ from what Amazon S3 would normally return. You can include any -// number of metadata headers. When including a metadata header, it should be -// prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: -// MyCustomValue. The primary use case for this is to forward GetObject metadata. +// returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// , in addition to RequestRoute , RequestToken , StatusCode , ErrorCode , and +// ErrorMessage . The GetObject response metadata is supported so that the +// WriteGetObjectResponse caller, typically an Lambda function, can provide the +// same metadata when it internally invokes GetObject . When WriteGetObjectResponse +// is called by a customer-owned Lambda function, the metadata returned to the end +// user GetObject call might differ from what Amazon S3 would normally return. You +// can include any number of metadata headers. When including a metadata header, it +// should be prefaced with x-amz-meta . For example, x-amz-meta-my-custom-header: +// MyCustomValue . The primary use case for this is to forward GetObject metadata. // Amazon Web Services provides some prebuilt Lambda functions that you can use // with S3 Object Lambda to detect and redact personally identifiable information // (PII) and decompress S3 objects. These Lambda functions are available in the @@ -52,9 +52,8 @@ import ( // equipped to decompress objects stored in S3 in one of six compressed file // formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. For information // on how to view and use these functions, see Using Amazon Web Services built -// Lambda functions -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) in -// the Amazon S3 User Guide. +// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) +// in the Amazon S3 User Guide. func (c *Client) WriteGetObjectResponse(ctx context.Context, params *WriteGetObjectResponseInput, optFns ...func(*Options)) (*WriteGetObjectResponseOutput, error) { if params == nil { params = &WriteGetObjectResponseInput{} @@ -91,7 +90,7 @@ type WriteGetObjectResponseInput struct { // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for // server-side encryption with Amazon Web Services KMS (SSE-KMS). - BucketKeyEnabled bool + BucketKeyEnabled *bool // Specifies caching behavior along the request/reply chain. CacheControl *string @@ -102,8 +101,7 @@ type WriteGetObjectResponseInput struct { // Lambda function. This may not match the checksum for the object stored in Amazon // S3. Amazon S3 will perform validation of the checksum values only when the // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. Only one checksum header can be specified at a // time. If you supply multiple checksum headers, this request will fail. ChecksumCRC32 *string @@ -114,8 +112,7 @@ type WriteGetObjectResponseInput struct { // Lambda function. This may not match the checksum for the object stored in Amazon // S3. Amazon S3 will perform validation of the checksum values only when the // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. Only one checksum header can be specified at a // time. If you supply multiple checksum headers, this request will fail. ChecksumCRC32C *string @@ -126,8 +123,7 @@ type WriteGetObjectResponseInput struct { // function. This may not match the checksum for the object stored in Amazon S3. // Amazon S3 will perform validation of the checksum values only when the original // GetObject request required checksum validation. For more information about - // checksums, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. Only one checksum header can be specified at a // time. If you supply multiple checksum headers, this request will fail. ChecksumSHA1 *string @@ -138,8 +134,7 @@ type WriteGetObjectResponseInput struct { // Lambda function. This may not match the checksum for the object stored in Amazon // S3. Amazon S3 will perform validation of the checksum values only when the // original GetObject request required checksum validation. For more information - // about checksums, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // about checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. Only one checksum header can be specified at a // time. If you supply multiple checksum headers, this request will fail. ChecksumSHA256 *string @@ -156,7 +151,7 @@ type WriteGetObjectResponseInput struct { ContentLanguage *string // The size of the content body in bytes. - ContentLength int64 + ContentLength *int64 // The portion of the object returned in the response. ContentRange *string @@ -164,24 +159,25 @@ type WriteGetObjectResponseInput struct { // A standard MIME type describing the format of the object data. ContentType *string - // Specifies whether an object stored in Amazon S3 is (true) or is not (false) a - // delete marker. - DeleteMarker bool + // Specifies whether an object stored in Amazon S3 is ( true ) or is not ( false ) + // a delete marker. + DeleteMarker *bool // An opaque identifier assigned by a web server to a specific version of a // resource found at a URL. ETag *string // A string that uniquely identifies an error condition. Returned in the tag of - // the error XML response for a corresponding GetObject call. Cannot be used with a - // successful StatusCode header or when the transformed object is provided in the - // body. All error codes from S3 are sentence-cased. The regular expression (regex) - // value is "^[A-Z][a-zA-Z]+$". + // the error XML response for a corresponding GetObject call. Cannot be used with + // a successful StatusCode header or when the transformed object is provided in + // the body. All error codes from S3 are sentence-cased. The regular expression + // (regex) value is "^[A-Z][a-zA-Z]+$" . ErrorCode *string // Contains a generic description of the error condition. Returned in the tag of - // the error XML response for a corresponding GetObject call. Cannot be used with a - // successful StatusCode header or when the transformed object is provided in body. + // the error XML response for a corresponding GetObject call. Cannot be used with + // a successful StatusCode header or when the transformed object is provided in + // body. ErrorMessage *string // If the object expiration is configured (see PUT Bucket lifecycle), the response @@ -203,29 +199,29 @@ type WriteGetObjectResponseInput struct { // can happen if you create metadata using an API like SOAP that supports more // flexible metadata than the REST API. For example, using SOAP, you can create // metadata whose values are not legal HTTP headers. - MissingMeta int32 + MissingMeta *int32 // Indicates whether an object stored in Amazon S3 has an active legal hold. ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus // Indicates whether an object stored in Amazon S3 has Object Lock enabled. For - // more information about S3 Object Lock, see Object Lock - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + // more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) + // . ObjectLockMode types.ObjectLockMode // The date and time when Object Lock is configured to expire. ObjectLockRetainUntilDate *time.Time // The count of parts this object has. - PartsCount int32 + PartsCount *int32 - // Indicates if request involves bucket that is either a source or destination in a - // Replication rule. For more information about S3 Replication, see Replication - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html). + // Indicates if request involves bucket that is either a source or destination in + // a Replication rule. For more information about S3 Replication, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html) + // . ReplicationStatus types.ReplicationStatus // If present, indicates that the requester was successfully charged for the - // request. + // request. This functionality is not supported for directory buckets. RequestCharged types.RequestCharged // Provides information about object restoration operation and expiration time of @@ -238,63 +234,45 @@ type WriteGetObjectResponseInput struct { // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 to // encrypt data stored in S3. For more information, see Protecting data using - // server-side encryption with customer-provided encryption keys (SSE-C) - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). + // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) + // . SSECustomerKeyMD5 *string - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric customer managed key that was used for - // stored in Amazon S3 object. + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web + // Services Key Management Service (Amazon Web Services KMS) symmetric encryption + // customer managed key that was used for stored in Amazon S3 object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing requested object in - // Amazon S3 (for example, AES256, aws:kms). + // Amazon S3 (for example, AES256, aws:kms ). ServerSideEncryption types.ServerSideEncryption // The integer status code for an HTTP response of a corresponding GetObject - // request. Status Codes - // - // * 200 - OK - // - // * 206 - Partial Content - // - // * 304 - Not - // Modified - // - // * 400 - Bad Request - // - // * 401 - Unauthorized - // - // * 403 - Forbidden - // - // * 404 - - // Not Found - // - // * 405 - Method Not Allowed - // - // * 409 - Conflict - // - // * 411 - Length - // Required - // - // * 412 - Precondition Failed - // - // * 416 - Range Not Satisfiable - // - // * 500 - - // Internal Server Error - // - // * 503 - Service Unavailable - StatusCode int32 + // request. The following is a list of status codes. + // - 200 - OK + // - 206 - Partial Content + // - 304 - Not Modified + // - 400 - Bad Request + // - 401 - Unauthorized + // - 403 - Forbidden + // - 404 - Not Found + // - 405 - Method Not Allowed + // - 409 - Conflict + // - 411 - Length Required + // - 412 - Precondition Failed + // - 416 - Range Not Satisfiable + // - 500 - Internal Server Error + // - 503 - Service Unavailable + StatusCode *int32 // Provides storage class information of the object. Amazon S3 returns this header // for all objects except for S3 Standard storage class objects. For more - // information, see Storage Classes - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // . StorageClass types.StorageClass // The number of tags, if any, on the object. - TagCount int32 + TagCount *int32 // An ID used to reference a specific version of the object. VersionId *string @@ -302,6 +280,11 @@ type WriteGetObjectResponseInput struct { noSmithyDocumentSerde } +func (in *WriteGetObjectResponseInput) bindEndpointParams(p *EndpointParameters) { + + p.UseObjectLambdaEndpoint = ptr.Bool(true) +} + type WriteGetObjectResponseOutput struct { // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata @@ -310,6 +293,9 @@ type WriteGetObjectResponseOutput struct { } func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } err = stack.Serialize.Add(&awsRestxml_serializeOpWriteGetObjectResponse{}, middleware.After) if err != nil { return err @@ -318,6 +304,13 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err != nil { return err } + if err := addProtocolFinalizerMiddlewares(stack, options, "WriteGetObjectResponse"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } @@ -339,16 +332,13 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addRetryMiddlewares(stack, options); err != nil { return err } - if err = addHTTPSignerV4Middleware(stack, options); err != nil { - return err - } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } - if err = addClientUserAgent(stack); err != nil { + if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { @@ -357,7 +347,10 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } - if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil { + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addPutBucketContextMiddleware(stack); err != nil { return err } if err = addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack); err != nil { @@ -372,6 +365,9 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addMetadataRetrieverMiddleware(stack); err != nil { return err } + if err = awsmiddleware.AddRecursionDetection(stack); err != nil { + return err + } if err = addWriteGetObjectResponseUpdateEndpoint(stack, options); err != nil { return err } @@ -387,6 +383,12 @@ func (c *Client) addOperationWriteGetObjectResponseMiddlewares(stack *middleware if err = addRequestResponseLogging(stack, options); err != nil { return err } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil { + return err + } return nil } @@ -397,11 +399,11 @@ func (*endpointPrefix_opWriteGetObjectResponseMiddleware) ID() string { return "EndpointHostPrefix" } -func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( - out middleware.SerializeOutput, metadata middleware.Metadata, err error, +func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { if smithyhttp.GetHostnameImmutable(ctx) || smithyhttp.IsEndpointHostPrefixDisabled(ctx) { - return next.HandleSerialize(ctx, in) + return next.HandleFinalize(ctx, in) } req, ok := in.Request.(*smithyhttp.Request) @@ -409,9 +411,10 @@ func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleSerialize(ctx return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } - input, ok := in.Parameters.(*WriteGetObjectResponseInput) + opaqueInput := getOperationInput(ctx) + input, ok := opaqueInput.(*WriteGetObjectResponseInput) if !ok { - return out, metadata, fmt.Errorf("unknown input type %T", in.Parameters) + return out, metadata, fmt.Errorf("unknown input type %T", opaqueInput) } var prefix strings.Builder @@ -425,17 +428,16 @@ func (m *endpointPrefix_opWriteGetObjectResponseMiddleware) HandleSerialize(ctx prefix.WriteString(".") req.URL.Host = prefix.String() + req.URL.Host - return next.HandleSerialize(ctx, in) + return next.HandleFinalize(ctx, in) } func addEndpointPrefix_opWriteGetObjectResponseMiddleware(stack *middleware.Stack) error { - return stack.Serialize.Insert(&endpointPrefix_opWriteGetObjectResponseMiddleware{}, `OperationSerializer`, middleware.After) + return stack.Finalize.Insert(&endpointPrefix_opWriteGetObjectResponseMiddleware{}, "ResolveEndpointV2", middleware.After) } func newServiceMetadataMiddleware_opWriteGetObjectResponse(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, - SigningName: "s3", OperationName: "WriteGetObjectResponse", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go new file mode 100644 index 00000000..6ef631bd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go @@ -0,0 +1,318 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +func bindAuthParamsRegion(params *AuthResolverParameters, _ interface{}, options Options) { + params.Region = options.Region +} + +func bindAuthEndpointParams(params *AuthResolverParameters, input interface{}, options Options) { + params.endpointParams = bindEndpointParams(input, options) +} + +type setLegacyContextSigningOptionsMiddleware struct { +} + +func (*setLegacyContextSigningOptionsMiddleware) ID() string { + return "setLegacyContextSigningOptions" +} + +func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + schemeID := rscheme.Scheme.SchemeID() + + if sn := awsmiddleware.GetSigningName(ctx); sn != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn) + } + } + + if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" { + if schemeID == "aws.auth#sigv4" { + smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr) + } else if schemeID == "aws.auth#sigv4a" { + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr}) + } + } + + return next.HandleFinalize(ctx, in) +} + +func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error { + return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before) +} + +type withAnonymous struct { + resolver AuthSchemeResolver +} + +var _ AuthSchemeResolver = (*withAnonymous)(nil) + +func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + opts, err := v.resolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + opts = append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }) + return opts, nil +} + +func wrapWithAnonymousAuth(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &withAnonymous{ + resolver: options.AuthSchemeResolver, + } +} + +// AuthResolverParameters contains the set of inputs necessary for auth scheme +// resolution. +type AuthResolverParameters struct { + // The name of the operation being invoked. + Operation string + + // The endpoint resolver parameters for this operation. This service's default + // resolver delegates to endpoint rules. + endpointParams *EndpointParameters + + // The region in which the operation is being invoked. + Region string +} + +func bindAuthResolverParams(operation string, input interface{}, options Options) *AuthResolverParameters { + params := &AuthResolverParameters{ + Operation: operation, + } + + bindAuthEndpointParams(params, input, options) + bindAuthParamsRegion(params, input, options) + + return params +} + +// AuthSchemeResolver returns a set of possible authentication options for an +// operation. +type AuthSchemeResolver interface { + ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error) +} + +type defaultAuthSchemeResolver struct{} + +var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil) + +func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) { + if overrides, ok := operationAuthOptions[params.Operation]; ok { + return overrides(params), nil + } + return serviceAuthOptions(params), nil +} + +var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{ + "WriteGetObjectResponse": func(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "s3") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + smithyhttp.SetIsUnsignedPayload(&props, true) + return props + }(), + }, + } + }, +} + +func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option { + return []*smithyauth.Option{ + { + SchemeID: smithyauth.SchemeIDSigV4, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4SigningName(&props, "s3") + smithyhttp.SetSigV4SigningRegion(&props, params.Region) + return props + }(), + }, + + { + SchemeID: smithyauth.SchemeIDSigV4A, + SignerProperties: func() smithy.Properties { + var props smithy.Properties + smithyhttp.SetSigV4ASigningName(&props, "s3") + smithyhttp.SetSigV4ASigningRegions(&props, []string{params.Region}) + return props + }(), + }, + } +} + +type resolveAuthSchemeMiddleware struct { + operation string + options Options +} + +func (*resolveAuthSchemeMiddleware) ID() string { + return "ResolveAuthScheme" +} + +func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + params := bindAuthResolverParams(m.operation, getOperationInput(ctx), m.options) + options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params) + if err != nil { + return out, metadata, fmt.Errorf("resolve auth scheme: %w", err) + } + + scheme, ok := m.selectScheme(options) + if !ok { + return out, metadata, fmt.Errorf("could not select an auth scheme") + } + + ctx = setResolvedAuthScheme(ctx, scheme) + return next.HandleFinalize(ctx, in) +} + +func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) { + for _, option := range options { + if option.SchemeID == smithyauth.SchemeIDAnonymous { + return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true + } + + for _, scheme := range m.options.AuthSchemes { + if scheme.SchemeID() != option.SchemeID { + continue + } + + if scheme.IdentityResolver(m.options) != nil { + return newResolvedAuthScheme(scheme, option), true + } + } + } + + return nil, false +} + +type resolvedAuthSchemeKey struct{} + +type resolvedAuthScheme struct { + Scheme smithyhttp.AuthScheme + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme { + return &resolvedAuthScheme{ + Scheme: scheme, + IdentityProperties: option.IdentityProperties, + SignerProperties: option.SignerProperties, + } +} + +func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context { + return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme) +} + +func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme { + v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme) + return v +} + +type getIdentityMiddleware struct { + options Options +} + +func (*getIdentityMiddleware) ID() string { + return "GetIdentity" +} + +func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + resolver := rscheme.Scheme.IdentityResolver(m.options) + if resolver == nil { + return out, metadata, fmt.Errorf("no identity resolver") + } + + identity, err := resolver.GetIdentity(ctx, rscheme.IdentityProperties) + if err != nil { + return out, metadata, fmt.Errorf("get identity: %w", err) + } + + ctx = setIdentity(ctx, identity) + return next.HandleFinalize(ctx, in) +} + +type identityKey struct{} + +func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context { + return middleware.WithStackValue(ctx, identityKey{}, identity) +} + +func getIdentity(ctx context.Context) smithyauth.Identity { + v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity) + return v +} + +type signRequestMiddleware struct { +} + +func (*signRequestMiddleware) ID() string { + return "Signing" +} + +func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + identity := getIdentity(ctx) + if identity == nil { + return out, metadata, fmt.Errorf("no identity") + } + + signer := rscheme.Scheme.Signer() + if signer == nil { + return out, metadata, fmt.Errorf("no signer") + } + + if err := signer.SignRequest(ctx, req, identity, rscheme.SignerProperties); err != nil { + return out, metadata, fmt.Errorf("sign request: %w", err) + } + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go new file mode 100644 index 00000000..860af056 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucket_context.go @@ -0,0 +1,47 @@ +package s3 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + "github.com/aws/smithy-go/middleware" +) + +// putBucketContextMiddleware stores the input bucket name within the request context (if +// present) which is required for a variety of custom S3 behaviors +type putBucketContextMiddleware struct{} + +func (*putBucketContextMiddleware) ID() string { + return "putBucketContext" +} + +func (m *putBucketContextMiddleware) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + if bucket, ok := m.bucketFromInput(in.Parameters); ok { + ctx = customizations.SetBucket(ctx, bucket) + } + return next.HandleSerialize(ctx, in) +} + +func (m *putBucketContextMiddleware) bucketFromInput(params interface{}) (string, bool) { + v, ok := params.(bucketer) + if !ok { + return "", false + } + + return v.bucket() +} + +func addPutBucketContextMiddleware(stack *middleware.Stack) error { + // This is essentially a post-Initialize task - only run it once the input + // has received all modifications from that phase. Therefore we add it as + // an early Serialize step. + // + // FUTURE: it would be nice to have explicit phases that only we as SDK + // authors can hook into (such as between phases like this really should + // be) + return stack.Serialize.Add(&putBucketContextMiddleware{}, middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go new file mode 100644 index 00000000..4e7f7e24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/bucketer.go @@ -0,0 +1,15 @@ +package s3 + +// implemented by all S3 input structures +type bucketer interface { + bucket() (string, bool) +} + +func bucketFromInput(params interface{}) (string, bool) { + v, ok := params.(bucketer) + if !ok { + return "", false + } + + return v.bucket() +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go new file mode 100644 index 00000000..5803b9e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/create_mpu_checksum.go @@ -0,0 +1,36 @@ +package s3 + +import ( + "context" + "fmt" + + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/smithy-go/middleware" +) + +// backfills checksum algorithm onto the context for CreateMultipart upload so +// transfer manager can set a checksum header on the request accordingly for +// s3express requests +type setCreateMPUChecksumAlgorithm struct{} + +func (*setCreateMPUChecksumAlgorithm) ID() string { + return "setCreateMPUChecksumAlgorithm" +} + +func (*setCreateMPUChecksumAlgorithm) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateMultipartUploadInput) + if !ok { + return out, metadata, fmt.Errorf("unexpected input type %T", in.Parameters) + } + + ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(input.ChecksumAlgorithm)) + return next.HandleSerialize(ctx, in) +} + +func addSetCreateMPUChecksumAlgorithm(s *middleware.Stack) error { + return s.Serialize.Add(&setCreateMPUChecksumAlgorithm{}, middleware.Before) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go index 995d909c..2be5df30 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/deserializers.go @@ -230,7 +230,7 @@ func awsRestxml_deserializeOpHttpBindingsCompleteMultipartUploadOutput(v *Comple if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-expiration"); len(headerValues) != 0 { @@ -516,7 +516,7 @@ func awsRestxml_deserializeOpHttpBindingsCopyObjectOutput(v *CopyObjectOutput, r if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { @@ -828,7 +828,7 @@ func awsRestxml_deserializeOpHttpBindingsCreateMultipartUploadOutput(v *CreateMu if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-checksum-algorithm"); len(headerValues) != 0 { @@ -943,6 +943,148 @@ func awsRestxml_deserializeOpDocumentCreateMultipartUploadOutput(v **CreateMulti return nil } +type awsRestxml_deserializeOpCreateSession struct { +} + +func (*awsRestxml_deserializeOpCreateSession) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpCreateSession) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorCreateSession(response, &metadata) + } + output := &CreateSessionOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentCreateSessionOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorCreateSession(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + case strings.EqualFold("NoSuchBucket", errorCode): + return awsRestxml_deserializeErrorNoSuchBucket(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpDocumentCreateSessionOutput(v **CreateSessionOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *CreateSessionOutput + if *v == nil { + sv = &CreateSessionOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Credentials", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSessionCredentials(&sv.Credentials, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + type awsRestxml_deserializeOpDeleteBucket struct { } @@ -2003,7 +2145,7 @@ func awsRestxml_deserializeOpHttpBindingsDeleteObjectOutput(v *DeleteObjectOutpu if err != nil { return err } - v.DeleteMarker = vv + v.DeleteMarker = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { @@ -2369,6 +2511,11 @@ func (m *awsRestxml_deserializeOpGetBucketAccelerateConfiguration) HandleDeseria output := &GetBucketAccelerateConfigurationOutput{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -2440,6 +2587,18 @@ func awsRestxml_deserializeOpErrorGetBucketAccelerateConfiguration(response *smi } } +func awsRestxml_deserializeOpHttpBindingsGetBucketAccelerateConfigurationOutput(v *GetBucketAccelerateConfigurationOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentGetBucketAccelerateConfigurationOutput(v **GetBucketAccelerateConfigurationOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -5263,7 +5422,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { @@ -5312,7 +5471,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.ContentLength = vv + v.ContentLength = ptr.Int64(vv) } if headerValues := response.Header.Values("Content-Range"); len(headerValues) != 0 { @@ -5331,7 +5490,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.DeleteMarker = vv + v.DeleteMarker = ptr.Bool(vv) } if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { @@ -5378,7 +5537,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.MissingMeta = int32(vv) + v.MissingMeta = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { @@ -5406,7 +5565,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.PartsCount = int32(vv) + v.PartsCount = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { @@ -5455,7 +5614,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectOutput(v *GetObjectOutput, res if err != nil { return err } - v.TagCount = int32(vv) + v.TagCount = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-version-id"); len(headerValues) != 0 { @@ -5759,7 +5918,7 @@ func awsRestxml_deserializeOpHttpBindingsGetObjectAttributesOutput(v *GetObjectA if err != nil { return err } - v.DeleteMarker = vv + v.DeleteMarker = ptr.Bool(vv) } if headerValues := response.Header.Values("Last-Modified"); len(headerValues) != 0 { @@ -5844,7 +6003,7 @@ func awsRestxml_deserializeOpDocumentGetObjectAttributesOutput(v **GetObjectAttr if err != nil { return err } - sv.ObjectSize = i64 + sv.ObjectSize = ptr.Int64(i64) } case strings.EqualFold("StorageClass", t.Name.Local): @@ -6711,10 +6870,9 @@ func (m *awsRestxml_deserializeOpHeadBucket) HandleDeserialize(ctx context.Conte output := &HeadBucketOutput{} out.Result = output - if _, err = io.Copy(ioutil.Discard, response.Body); err != nil { - return out, metadata, &smithy.DeserializationError{ - Err: fmt.Errorf("failed to discard response body, %w", err), - } + err = awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} } return out, metadata, err @@ -6763,6 +6921,38 @@ func awsRestxml_deserializeOpErrorHeadBucket(response *smithyhttp.Response, meta } } +func awsRestxml_deserializeOpHttpBindingsHeadBucketOutput(v *HeadBucketOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-access-point-alias"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + vv, err := strconv.ParseBool(headerValues[0]) + if err != nil { + return err + } + v.AccessPointAlias = ptr.Bool(vv) + } + + if headerValues := response.Header.Values("x-amz-bucket-location-name"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.BucketLocationName = ptr.String(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-bucket-location-type"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.BucketLocationType = types.LocationType(headerValues[0]) + } + + if headerValues := response.Header.Values("x-amz-bucket-region"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.BucketRegion = ptr.String(headerValues[0]) + } + + return nil +} + type awsRestxml_deserializeOpHeadObject struct { } @@ -6827,6 +7017,9 @@ func awsRestxml_deserializeOpErrorHeadObject(response *smithyhttp.Response, meta } errorBody.Seek(0, io.SeekStart) switch { + case strings.EqualFold("NotFound", errorCode): + return awsRestxml_deserializeErrorNotFound(response, errorBody) + default: genericError := &smithy.GenericAPIError{ Code: errorCode, @@ -6858,7 +7051,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("Cache-Control"); len(headerValues) != 0 { @@ -6907,7 +7100,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.ContentLength = vv + v.ContentLength = ptr.Int64(vv) } if headerValues := response.Header.Values("Content-Type"); len(headerValues) != 0 { @@ -6921,7 +7114,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.DeleteMarker = vv + v.DeleteMarker = ptr.Bool(vv) } if headerValues := response.Header.Values("ETag"); len(headerValues) != 0 { @@ -6968,7 +7161,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.MissingMeta = int32(vv) + v.MissingMeta = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-object-lock-legal-hold"); len(headerValues) != 0 { @@ -6996,7 +7189,7 @@ func awsRestxml_deserializeOpHttpBindingsHeadObjectOutput(v *HeadObjectOutput, r if err != nil { return err } - v.PartsCount = int32(vv) + v.PartsCount = ptr.Int32(int32(vv)) } if headerValues := response.Header.Values("x-amz-replication-status"); len(headerValues) != 0 { @@ -7203,7 +7396,7 @@ func awsRestxml_deserializeOpDocumentListBucketAnalyticsConfigurationsOutput(v * if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("NextContinuationToken", t.Name.Local): @@ -7384,7 +7577,7 @@ func awsRestxml_deserializeOpDocumentListBucketIntelligentTieringConfigurationsO if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("NextContinuationToken", t.Name.Local): @@ -7565,7 +7758,7 @@ func awsRestxml_deserializeOpDocumentListBucketInventoryConfigurationsOutput(v * if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("NextContinuationToken", t.Name.Local): @@ -7740,7 +7933,7 @@ func awsRestxml_deserializeOpDocumentListBucketMetricsConfigurationsOutput(v **L if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("MetricsConfiguration", t.Name.Local): @@ -7921,14 +8114,14 @@ func awsRestxml_deserializeOpDocumentListBucketsOutput(v **ListBucketsOutput, de return nil } -type awsRestxml_deserializeOpListMultipartUploads struct { +type awsRestxml_deserializeOpListDirectoryBuckets struct { } -func (*awsRestxml_deserializeOpListMultipartUploads) ID() string { +func (*awsRestxml_deserializeOpListDirectoryBuckets) ID() string { return "OperationDeserializer" } -func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( +func (m *awsRestxml_deserializeOpListDirectoryBuckets) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( out middleware.DeserializeOutput, metadata middleware.Metadata, err error, ) { out, metadata, err = next.HandleDeserialize(ctx, in) @@ -7942,9 +8135,9 @@ func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx con } if response.StatusCode < 200 || response.StatusCode >= 300 { - return out, metadata, awsRestxml_deserializeOpErrorListMultipartUploads(response, &metadata) + return out, metadata, awsRestxml_deserializeOpErrorListDirectoryBuckets(response, &metadata) } - output := &ListMultipartUploadsOutput{} + output := &ListDirectoryBucketsOutput{} out.Result = output var buff [1024]byte @@ -7965,7 +8158,7 @@ func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx con } decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) - err = awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(&output, decoder) + err = awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(&output, decoder) if err != nil { var snapshot bytes.Buffer io.Copy(&snapshot, ringBuffer) @@ -7978,7 +8171,7 @@ func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx con return out, metadata, err } -func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Response, metadata *middleware.Metadata) error { +func awsRestxml_deserializeOpErrorListDirectoryBuckets(response *smithyhttp.Response, metadata *middleware.Metadata) error { var errorBuffer bytes.Buffer if _, err := io.Copy(&errorBuffer, response.Body); err != nil { return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} @@ -8018,13 +8211,13 @@ func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Resp } } -func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipartUploadsOutput, decoder smithyxml.NodeDecoder) error { +func awsRestxml_deserializeOpDocumentListDirectoryBucketsOutput(v **ListDirectoryBucketsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) } - var sv *ListMultipartUploadsOutput + var sv *ListDirectoryBucketsOutput if *v == nil { - sv = &ListMultipartUploadsOutput{} + sv = &ListDirectoryBucketsOutput{} } else { sv = *v } @@ -8040,26 +8233,13 @@ func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipar originalDecoder := decoder decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) switch { - case strings.EqualFold("Bucket", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.Bucket = ptr.String(xtv) - } - - case strings.EqualFold("CommonPrefixes", t.Name.Local): + case strings.EqualFold("Buckets", t.Name.Local): nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) - if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + if err := awsRestxml_deserializeDocumentBuckets(&sv.Buckets, nodeDecoder); err != nil { return err } - case strings.EqualFold("Delimiter", t.Name.Local): + case strings.EqualFold("ContinuationToken", t.Name.Local): val, err := decoder.Value() if err != nil { return err @@ -8069,53 +8249,235 @@ func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipar } { xtv := string(val) - sv.Delimiter = ptr.String(xtv) + sv.ContinuationToken = ptr.String(xtv) } - case strings.EqualFold("EncodingType", t.Name.Local): - val, err := decoder.Value() + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() if err != nil { return err } - if val == nil { - break - } - { - xtv := string(val) - sv.EncodingType = types.EncodingType(xtv) - } - case strings.EqualFold("IsTruncated", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv, err := strconv.ParseBool(string(val)) - if err != nil { - return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) - } - sv.IsTruncated = xtv - } + } + decoder = originalDecoder + } + *v = sv + return nil +} - case strings.EqualFold("KeyMarker", t.Name.Local): - val, err := decoder.Value() - if err != nil { - return err - } - if val == nil { - break - } - { - xtv := string(val) - sv.KeyMarker = ptr.String(xtv) - } +type awsRestxml_deserializeOpListMultipartUploads struct { +} - case strings.EqualFold("MaxUploads", t.Name.Local): - val, err := decoder.Value() +func (*awsRestxml_deserializeOpListMultipartUploads) ID() string { + return "OperationDeserializer" +} + +func (m *awsRestxml_deserializeOpListMultipartUploads) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsRestxml_deserializeOpErrorListMultipartUploads(response, &metadata) + } + output := &ListMultipartUploadsOutput{} + out.Result = output + + err = awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + body := io.TeeReader(response.Body, ringBuffer) + rootDecoder := xml.NewDecoder(body) + t, err := smithyxml.FetchRootElement(rootDecoder) + if err == io.EOF { + return out, metadata, nil + } + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + decoder := smithyxml.WrapNodeDecoder(rootDecoder, t) + err = awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(&output, decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + return out, metadata, &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + } + + return out, metadata, err +} + +func awsRestxml_deserializeOpErrorListMultipartUploads(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + errorComponents, err := s3shared.GetErrorResponseComponents(errorBody, s3shared.ErrorResponseDeserializerOptions{ + UseStatusCode: true, StatusCode: response.StatusCode, + }) + if err != nil { + return err + } + if hostID := errorComponents.HostID; len(hostID) != 0 { + s3shared.SetHostIDMetadata(metadata, hostID) + } + if reqID := errorComponents.RequestID; len(reqID) != 0 { + awsmiddleware.SetRequestIDMetadata(metadata, reqID) + } + if len(errorComponents.Code) != 0 { + errorCode = errorComponents.Code + } + if len(errorComponents.Message) != 0 { + errorMessage = errorComponents.Message + } + errorBody.Seek(0, io.SeekStart) + switch { + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + +func awsRestxml_deserializeOpHttpBindingsListMultipartUploadsOutput(v *ListMultipartUploadsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} +func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipartUploadsOutput, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *ListMultipartUploadsOutput + if *v == nil { + sv = &ListMultipartUploadsOutput{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("Bucket", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Bucket = ptr.String(xtv) + } + + case strings.EqualFold("CommonPrefixes", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentCommonPrefixListUnwrapped(&sv.CommonPrefixes, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Delimiter", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.Delimiter = ptr.String(xtv) + } + + case strings.EqualFold("EncodingType", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.EncodingType = types.EncodingType(xtv) + } + + case strings.EqualFold("IsTruncated", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) + } + sv.IsTruncated = ptr.Bool(xtv) + } + + case strings.EqualFold("KeyMarker", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.KeyMarker = ptr.String(xtv) + } + + case strings.EqualFold("MaxUploads", t.Name.Local): + val, err := decoder.Value() if err != nil { return err } @@ -8128,7 +8490,7 @@ func awsRestxml_deserializeOpDocumentListMultipartUploadsOutput(v **ListMultipar if err != nil { return err } - sv.MaxUploads = int32(i64) + sv.MaxUploads = ptr.Int32(int32(i64)) } case strings.EqualFold("NextKeyMarker", t.Name.Local): @@ -8229,6 +8591,11 @@ func (m *awsRestxml_deserializeOpListObjects) HandleDeserialize(ctx context.Cont output := &ListObjectsOutput{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsListObjectsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -8303,6 +8670,18 @@ func awsRestxml_deserializeOpErrorListObjects(response *smithyhttp.Response, met } } +func awsRestxml_deserializeOpHttpBindingsListObjectsOutput(v *ListObjectsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -8376,7 +8755,7 @@ func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, de if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("Marker", t.Name.Local): @@ -8406,7 +8785,7 @@ func awsRestxml_deserializeOpDocumentListObjectsOutput(v **ListObjectsOutput, de if err != nil { return err } - sv.MaxKeys = int32(i64) + sv.MaxKeys = ptr.Int32(int32(i64)) } case strings.EqualFold("Name", t.Name.Local): @@ -8488,6 +8867,11 @@ func (m *awsRestxml_deserializeOpListObjectsV2) HandleDeserialize(ctx context.Co output := &ListObjectsV2Output{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -8562,6 +8946,18 @@ func awsRestxml_deserializeOpErrorListObjectsV2(response *smithyhttp.Response, m } } +func awsRestxml_deserializeOpHttpBindingsListObjectsV2Output(v *ListObjectsV2Output, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -8648,7 +9044,7 @@ func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("KeyCount", t.Name.Local): @@ -8665,7 +9061,7 @@ func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output if err != nil { return err } - sv.KeyCount = int32(i64) + sv.KeyCount = ptr.Int32(int32(i64)) } case strings.EqualFold("MaxKeys", t.Name.Local): @@ -8682,7 +9078,7 @@ func awsRestxml_deserializeOpDocumentListObjectsV2Output(v **ListObjectsV2Output if err != nil { return err } - sv.MaxKeys = int32(i64) + sv.MaxKeys = ptr.Int32(int32(i64)) } case strings.EqualFold("Name", t.Name.Local): @@ -8777,6 +9173,11 @@ func (m *awsRestxml_deserializeOpListObjectVersions) HandleDeserialize(ctx conte output := &ListObjectVersionsOutput{} out.Result = output + err = awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(output, response) + if err != nil { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("failed to decode response with invalid Http bindings, %w", err)} + } + var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) body := io.TeeReader(response.Body, ringBuffer) @@ -8848,6 +9249,18 @@ func awsRestxml_deserializeOpErrorListObjectVersions(response *smithyhttp.Respon } } +func awsRestxml_deserializeOpHttpBindingsListObjectVersionsOutput(v *ListObjectVersionsOutput, response *smithyhttp.Response) error { + if v == nil { + return fmt.Errorf("unsupported deserialization for nil %T", v) + } + + if headerValues := response.Header.Values("x-amz-request-charged"); len(headerValues) != 0 { + headerValues[0] = strings.TrimSpace(headerValues[0]) + v.RequestCharged = types.RequestCharged(headerValues[0]) + } + + return nil +} func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVersionsOutput, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -8921,7 +9334,7 @@ func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVers if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("KeyMarker", t.Name.Local): @@ -8951,7 +9364,7 @@ func awsRestxml_deserializeOpDocumentListObjectVersionsOutput(v **ListObjectVers if err != nil { return err } - sv.MaxKeys = int32(i64) + sv.MaxKeys = ptr.Int32(int32(i64)) } case strings.EqualFold("Name", t.Name.Local): @@ -9234,7 +9647,7 @@ func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decode if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("Key", t.Name.Local): @@ -9264,7 +9677,7 @@ func awsRestxml_deserializeOpDocumentListPartsOutput(v **ListPartsOutput, decode if err != nil { return err } - sv.MaxParts = int32(i64) + sv.MaxParts = ptr.Int32(int32(i64)) } case strings.EqualFold("NextPartNumberMarker", t.Name.Local): @@ -10780,7 +11193,7 @@ func awsRestxml_deserializeOpHttpBindingsPutObjectOutput(v *PutObjectOutput, res if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { @@ -11613,7 +12026,7 @@ func awsRestxml_deserializeOpHttpBindingsUploadPartOutput(v *UploadPartOutput, r if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-checksum-crc32"); len(headerValues) != 0 { @@ -11782,7 +12195,7 @@ func awsRestxml_deserializeOpHttpBindingsUploadPartCopyOutput(v *UploadPartCopyO if err != nil { return err } - v.BucketKeyEnabled = vv + v.BucketKeyEnabled = ptr.Bool(vv) } if headerValues := response.Header.Values("x-amz-copy-source-version-id"); len(headerValues) != 0 { @@ -12304,7 +12717,7 @@ func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxm if err != nil { return err } - sv.BytesProcessed = i64 + sv.BytesProcessed = ptr.Int64(i64) } case strings.EqualFold("BytesReturned", t.Name.Local): @@ -12321,7 +12734,7 @@ func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxm if err != nil { return err } - sv.BytesReturned = i64 + sv.BytesReturned = ptr.Int64(i64) } case strings.EqualFold("BytesScanned", t.Name.Local): @@ -12338,7 +12751,7 @@ func awsRestxml_deserializeDocumentProgress(v **types.Progress, decoder smithyxm if err != nil { return err } - sv.BytesScanned = i64 + sv.BytesScanned = ptr.Int64(i64) } default: @@ -12391,7 +12804,7 @@ func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.Node if err != nil { return err } - sv.BytesProcessed = i64 + sv.BytesProcessed = ptr.Int64(i64) } case strings.EqualFold("BytesReturned", t.Name.Local): @@ -12408,7 +12821,7 @@ func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.Node if err != nil { return err } - sv.BytesReturned = i64 + sv.BytesReturned = ptr.Int64(i64) } case strings.EqualFold("BytesScanned", t.Name.Local): @@ -12425,7 +12838,7 @@ func awsRestxml_deserializeDocumentStats(v **types.Stats, decoder smithyxml.Node if err != nil { return err } - sv.BytesScanned = i64 + sv.BytesScanned = ptr.Int64(i64) } default: @@ -12551,7 +12964,7 @@ func awsRestxml_deserializeDocumentAbortIncompleteMultipartUpload(v **types.Abor if err != nil { return err } - sv.DaysAfterInitiation = int32(i64) + sv.DaysAfterInitiation = ptr.Int32(int32(i64)) } default: @@ -14104,7 +14517,7 @@ func awsRestxml_deserializeDocumentCORSRule(v **types.CORSRule, decoder smithyxm if err != nil { return err } - sv.MaxAgeSeconds = int32(i64) + sv.MaxAgeSeconds = ptr.Int32(int32(i64)) } default: @@ -14225,7 +14638,7 @@ func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, if err != nil { return err } - sv.Days = int32(i64) + sv.Days = ptr.Int32(int32(i64)) } case strings.EqualFold("Mode", t.Name.Local): @@ -14255,7 +14668,7 @@ func awsRestxml_deserializeDocumentDefaultRetention(v **types.DefaultRetention, if err != nil { return err } - sv.Years = int32(i64) + sv.Years = ptr.Int32(int32(i64)) } default: @@ -14307,7 +14720,7 @@ func awsRestxml_deserializeDocumentDeletedObject(v **types.DeletedObject, decode if err != nil { return fmt.Errorf("expected DeleteMarker to be of type *bool, got %T instead", val) } - sv.DeleteMarker = xtv + sv.DeleteMarker = ptr.Bool(xtv) } case strings.EqualFold("DeleteMarkerVersionId", t.Name.Local): @@ -14466,7 +14879,7 @@ func awsRestxml_deserializeDocumentDeleteMarkerEntry(v **types.DeleteMarkerEntry if err != nil { return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) } - sv.IsLatest = xtv + sv.IsLatest = ptr.Bool(xtv) } case strings.EqualFold("Key", t.Name.Local): @@ -15412,7 +15825,7 @@ func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectA if err != nil { return fmt.Errorf("expected IsTruncated to be of type *bool, got %T instead", val) } - sv.IsTruncated = xtv + sv.IsTruncated = ptr.Bool(xtv) } case strings.EqualFold("MaxParts", t.Name.Local): @@ -15429,7 +15842,7 @@ func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectA if err != nil { return err } - sv.MaxParts = int32(i64) + sv.MaxParts = ptr.Int32(int32(i64)) } case strings.EqualFold("NextPartNumberMarker", t.Name.Local): @@ -15478,7 +15891,7 @@ func awsRestxml_deserializeDocumentGetObjectAttributesParts(v **types.GetObjectA if err != nil { return err } - sv.TotalPartsCount = int32(i64) + sv.TotalPartsCount = ptr.Int32(int32(i64)) } default: @@ -16225,7 +16638,7 @@ func awsRestxml_deserializeDocumentInventoryConfiguration(v **types.InventoryCon if err != nil { return fmt.Errorf("expected IsEnabled to be of type *bool, got %T instead", val) } - sv.IsEnabled = xtv + sv.IsEnabled = ptr.Bool(xtv) } case strings.EqualFold("OptionalFields", t.Name.Local): @@ -16879,7 +17292,7 @@ func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpira if err != nil { return err } - sv.Days = int32(i64) + sv.Days = ptr.Int32(int32(i64)) } case strings.EqualFold("ExpiredObjectDeleteMarker", t.Name.Local): @@ -16895,7 +17308,7 @@ func awsRestxml_deserializeDocumentLifecycleExpiration(v **types.LifecycleExpira if err != nil { return fmt.Errorf("expected ExpiredObjectDeleteMarker to be of type *bool, got %T instead", val) } - sv.ExpiredObjectDeleteMarker = xtv + sv.ExpiredObjectDeleteMarker = ptr.Bool(xtv) } default: @@ -17059,7 +17472,7 @@ func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleR if err != nil { return err } - sv.ObjectSizeGreaterThan = i64 + sv.ObjectSizeGreaterThan = ptr.Int64(i64) } case strings.EqualFold("ObjectSizeLessThan", t.Name.Local): @@ -17076,7 +17489,7 @@ func awsRestxml_deserializeDocumentLifecycleRuleAndOperator(v **types.LifecycleR if err != nil { return err } - sv.ObjectSizeLessThan = i64 + sv.ObjectSizeLessThan = ptr.Int64(i64) } case strings.EqualFold("Prefix", t.Name.Local): @@ -17332,6 +17745,12 @@ func awsRestxml_deserializeDocumentLoggingEnabled(v **types.LoggingEnabled, deco return err } + case strings.EqualFold("TargetObjectKeyFormat", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentTargetObjectKeyFormat(&sv.TargetObjectKeyFormat, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("TargetPrefix", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -17913,7 +18332,7 @@ func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.Noncurr if err != nil { return err } - sv.NewerNoncurrentVersions = int32(i64) + sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) } case strings.EqualFold("NoncurrentDays", t.Name.Local): @@ -17930,7 +18349,7 @@ func awsRestxml_deserializeDocumentNoncurrentVersionExpiration(v **types.Noncurr if err != nil { return err } - sv.NoncurrentDays = int32(i64) + sv.NoncurrentDays = ptr.Int32(int32(i64)) } default: @@ -17983,7 +18402,7 @@ func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.Noncurr if err != nil { return err } - sv.NewerNoncurrentVersions = int32(i64) + sv.NewerNoncurrentVersions = ptr.Int32(int32(i64)) } case strings.EqualFold("NoncurrentDays", t.Name.Local): @@ -18000,7 +18419,7 @@ func awsRestxml_deserializeDocumentNoncurrentVersionTransition(v **types.Noncurr if err != nil { return err } - sv.NoncurrentDays = int32(i64) + sv.NoncurrentDays = ptr.Int32(int32(i64)) } case strings.EqualFold("StorageClass", t.Name.Local): @@ -18361,7 +18780,13 @@ func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.No return err } - case strings.EqualFold("Size", t.Name.Local): + case strings.EqualFold("RestoreStatus", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("Size", t.Name.Local): val, err := decoder.Value() if err != nil { return err @@ -18375,7 +18800,7 @@ func awsRestxml_deserializeDocumentObject(v **types.Object, decoder smithyxml.No if err != nil { return err } - sv.Size = i64 + sv.Size = ptr.Int64(i64) } case strings.EqualFold("StorageClass", t.Name.Local): @@ -18845,7 +19270,7 @@ func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smit if err != nil { return err } - sv.PartNumber = int32(i64) + sv.PartNumber = ptr.Int32(int32(i64)) } case strings.EqualFold("Size", t.Name.Local): @@ -18862,7 +19287,7 @@ func awsRestxml_deserializeDocumentObjectPart(v **types.ObjectPart, decoder smit if err != nil { return err } - sv.Size = i64 + sv.Size = ptr.Int64(i64) } default: @@ -18933,7 +19358,7 @@ func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decode if err != nil { return fmt.Errorf("expected IsLatest to be of type *bool, got %T instead", val) } - sv.IsLatest = xtv + sv.IsLatest = ptr.Bool(xtv) } case strings.EqualFold("Key", t.Name.Local): @@ -18972,6 +19397,12 @@ func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decode return err } + case strings.EqualFold("RestoreStatus", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentRestoreStatus(&sv.RestoreStatus, nodeDecoder); err != nil { + return err + } + case strings.EqualFold("Size", t.Name.Local): val, err := decoder.Value() if err != nil { @@ -18986,7 +19417,7 @@ func awsRestxml_deserializeDocumentObjectVersion(v **types.ObjectVersion, decode if err != nil { return err } - sv.Size = i64 + sv.Size = ptr.Int64(i64) } case strings.EqualFold("StorageClass", t.Name.Local): @@ -19436,7 +19867,7 @@ func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDe if err != nil { return err } - sv.PartNumber = int32(i64) + sv.PartNumber = ptr.Int32(int32(i64)) } case strings.EqualFold("Size", t.Name.Local): @@ -19453,7 +19884,56 @@ func awsRestxml_deserializeDocumentPart(v **types.Part, decoder smithyxml.NodeDe if err != nil { return err } - sv.Size = i64 + sv.Size = ptr.Int64(i64) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentPartitionedPrefix(v **types.PartitionedPrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.PartitionedPrefix + if *v == nil { + sv = &types.PartitionedPrefix{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PartitionDateSource", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.PartitionDateSource = types.PartitionDateSource(xtv) } default: @@ -19641,7 +20121,7 @@ func awsRestxml_deserializeDocumentPolicyStatus(v **types.PolicyStatus, decoder if err != nil { return fmt.Errorf("expected IsPublic to be of type *bool, got %T instead", val) } - sv.IsPublic = xtv + sv.IsPublic = ptr.Bool(xtv) } default: @@ -19693,7 +20173,7 @@ func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.Publ if err != nil { return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) } - sv.BlockPublicAcls = xtv + sv.BlockPublicAcls = ptr.Bool(xtv) } case strings.EqualFold("BlockPublicPolicy", t.Name.Local): @@ -19709,7 +20189,7 @@ func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.Publ if err != nil { return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) } - sv.BlockPublicPolicy = xtv + sv.BlockPublicPolicy = ptr.Bool(xtv) } case strings.EqualFold("IgnorePublicAcls", t.Name.Local): @@ -19725,7 +20205,7 @@ func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.Publ if err != nil { return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) } - sv.IgnorePublicAcls = xtv + sv.IgnorePublicAcls = ptr.Bool(xtv) } case strings.EqualFold("RestrictPublicBuckets", t.Name.Local): @@ -19741,7 +20221,7 @@ func awsRestxml_deserializeDocumentPublicAccessBlockConfiguration(v **types.Publ if err != nil { return fmt.Errorf("expected Setting to be of type *bool, got %T instead", val) } - sv.RestrictPublicBuckets = xtv + sv.RestrictPublicBuckets = ptr.Bool(xtv) } default: @@ -20253,7 +20733,7 @@ func awsRestxml_deserializeDocumentReplicationRule(v **types.ReplicationRule, de if err != nil { return err } - sv.Priority = int32(i64) + sv.Priority = ptr.Int32(int32(i64)) } case strings.EqualFold("SourceSelectionCriteria", t.Name.Local): @@ -20574,7 +21054,76 @@ func awsRestxml_deserializeDocumentReplicationTimeValue(v **types.ReplicationTim if err != nil { return err } - sv.Minutes = int32(i64) + sv.Minutes = ptr.Int32(int32(i64)) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentRestoreStatus(v **types.RestoreStatus, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.RestoreStatus + if *v == nil { + sv = &types.RestoreStatus{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("IsRestoreInProgress", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv, err := strconv.ParseBool(string(val)) + if err != nil { + return fmt.Errorf("expected IsRestoreInProgress to be of type *bool, got %T instead", val) + } + sv.IsRestoreInProgress = ptr.Bool(xtv) + } + + case strings.EqualFold("RestoreExpiryDate", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.RestoreExpiryDate = ptr.Time(t) } default: @@ -20894,7 +21443,7 @@ func awsRestxml_deserializeDocumentServerSideEncryptionRule(v **types.ServerSide if err != nil { return fmt.Errorf("expected BucketKeyEnabled to be of type *bool, got %T instead", val) } - sv.BucketKeyEnabled = xtv + sv.BucketKeyEnabled = ptr.Bool(xtv) } default: @@ -20979,6 +21528,134 @@ func awsRestxml_deserializeDocumentServerSideEncryptionRulesUnwrapped(v *[]types *v = sv return nil } +func awsRestxml_deserializeDocumentSessionCredentials(v **types.SessionCredentials, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SessionCredentials + if *v == nil { + sv = &types.SessionCredentials{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("AccessKeyId", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.AccessKeyId = ptr.String(xtv) + } + + case strings.EqualFold("Expiration", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + t, err := smithytime.ParseDateTime(xtv) + if err != nil { + return err + } + sv.Expiration = ptr.Time(t) + } + + case strings.EqualFold("SecretAccessKey", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SecretAccessKey = ptr.String(xtv) + } + + case strings.EqualFold("SessionToken", t.Name.Local): + val, err := decoder.Value() + if err != nil { + return err + } + if val == nil { + break + } + { + xtv := string(val) + sv.SessionToken = ptr.String(xtv) + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + +func awsRestxml_deserializeDocumentSimplePrefix(v **types.SimplePrefix, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.SimplePrefix + if *v == nil { + sv = &types.SimplePrefix{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentSourceSelectionCriteria(v **types.SourceSelectionCriteria, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -21511,6 +22188,54 @@ func awsRestxml_deserializeDocumentTargetGrantsUnwrapped(v *[]types.TargetGrant, *v = sv return nil } +func awsRestxml_deserializeDocumentTargetObjectKeyFormat(v **types.TargetObjectKeyFormat, decoder smithyxml.NodeDecoder) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + var sv *types.TargetObjectKeyFormat + if *v == nil { + sv = &types.TargetObjectKeyFormat{} + } else { + sv = *v + } + + for { + t, done, err := decoder.Token() + if err != nil { + return err + } + if done { + break + } + originalDecoder := decoder + decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t) + switch { + case strings.EqualFold("PartitionedPrefix", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentPartitionedPrefix(&sv.PartitionedPrefix, nodeDecoder); err != nil { + return err + } + + case strings.EqualFold("SimplePrefix", t.Name.Local): + nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t) + if err := awsRestxml_deserializeDocumentSimplePrefix(&sv.SimplePrefix, nodeDecoder); err != nil { + return err + } + + default: + // Do nothing and ignore the unexpected tag element + err = decoder.Decoder.Skip() + if err != nil { + return err + } + + } + decoder = originalDecoder + } + *v = sv + return nil +} + func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml.NodeDecoder) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -21560,7 +22285,7 @@ func awsRestxml_deserializeDocumentTiering(v **types.Tiering, decoder smithyxml. if err != nil { return err } - sv.Days = int32(i64) + sv.Days = ptr.Int32(int32(i64)) } default: @@ -21840,7 +22565,7 @@ func awsRestxml_deserializeDocumentTransition(v **types.Transition, decoder smit if err != nil { return err } - sv.Days = int32(i64) + sv.Days = ptr.Int32(int32(i64)) } case strings.EqualFold("StorageClass", t.Name.Local): diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go new file mode 100644 index 00000000..91af48fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoint_auth_resolver.go @@ -0,0 +1,115 @@ +package s3 + +import ( + "context" + "fmt" + + smithyauth "github.com/aws/smithy-go/auth" +) + +type endpointAuthResolver struct { + EndpointResolver EndpointResolverV2 +} + +var _ AuthSchemeResolver = (*endpointAuthResolver)(nil) + +func (r *endpointAuthResolver) ResolveAuthSchemes( + ctx context.Context, params *AuthResolverParameters, +) ( + []*smithyauth.Option, error, +) { + opts, err := r.resolveAuthSchemes(ctx, params) + if err != nil { + return nil, err + } + + // canonicalize sigv4-s3express ID + for _, opt := range opts { + if opt.SchemeID == "sigv4-s3express" { + opt.SchemeID = "com.amazonaws.s3#sigv4express" + } + } + + // preserve pre-SRA behavior where everything technically had anonymous + return append(opts, &smithyauth.Option{ + SchemeID: smithyauth.SchemeIDAnonymous, + }), nil +} + +func (r *endpointAuthResolver) resolveAuthSchemes( + ctx context.Context, params *AuthResolverParameters, +) ( + []*smithyauth.Option, error, +) { + baseOpts, err := (&defaultAuthSchemeResolver{}).ResolveAuthSchemes(ctx, params) + if err != nil { + return nil, fmt.Errorf("get base options: %w", err) + } + + endpt, err := r.EndpointResolver.ResolveEndpoint(ctx, *params.endpointParams) + if err != nil { + return nil, fmt.Errorf("resolve endpoint: %w", err) + } + + endptOpts, ok := smithyauth.GetAuthOptions(&endpt.Properties) + if !ok { + return baseOpts, nil + } + + // the list of options from the endpoint is authoritative, however, the + // modeled options have some properties that the endpoint ones don't, so we + // start from the latter and merge in + for _, endptOpt := range endptOpts { + if baseOpt := findScheme(baseOpts, endptOpt.SchemeID); baseOpt != nil { + rebaseProps(endptOpt, baseOpt) + } + } + + return endptOpts, nil +} + +// rebase the properties of dst, taking src as the base and overlaying those +// from dst +func rebaseProps(dst, src *smithyauth.Option) { + iprops, sprops := src.IdentityProperties, src.SignerProperties + + iprops.SetAll(&dst.IdentityProperties) + sprops.SetAll(&dst.SignerProperties) + + dst.IdentityProperties = iprops + dst.SignerProperties = sprops +} + +func findScheme(opts []*smithyauth.Option, schemeID string) *smithyauth.Option { + for _, opt := range opts { + if opt.SchemeID == schemeID { + return opt + } + } + return nil +} + +func finalizeServiceEndpointAuthResolver(options *Options) { + if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok { + return + } + + options.AuthSchemeResolver = &endpointAuthResolver{ + EndpointResolver: options.EndpointResolverV2, + } +} + +func finalizeOperationEndpointAuthResolver(options *Options) { + resolver, ok := options.AuthSchemeResolver.(*endpointAuthResolver) + if !ok { + return + } + + if resolver.EndpointResolver == options.EndpointResolverV2 { + return + } + + options.AuthSchemeResolver = &endpointAuthResolver{ + EndpointResolver: options.EndpointResolverV2, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go index 8df6368c..a1f2e36d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go @@ -8,10 +8,22 @@ import ( "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" + internalcontext "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/internal/endpoints" + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" + smithy "github.com/aws/smithy-go" + smithyauth "github.com/aws/smithy-go/auth" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/aws/smithy-go/endpoints/private/rulesfn" "github.com/aws/smithy-go/middleware" + "github.com/aws/smithy-go/ptr" smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" "net/url" + "os" "strings" ) @@ -39,13 +51,6 @@ func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointRe return fn(region, options) } -func resolveDefaultEndpointConfiguration(o *Options) { - if o.EndpointResolver != nil { - return - } - o.EndpointResolver = NewDefaultEndpointResolver() -} - // EndpointResolverFromURL returns an EndpointResolver configured using the // provided endpoint url. By default, the resolved endpoint resolver uses the // client region as signing region, and the endpoint source is set to @@ -79,6 +84,10 @@ func (*ResolveEndpoint) ID() string { func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) @@ -94,6 +103,11 @@ func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.Ser var endpoint aws.Endpoint endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo) if err != nil { + nf := (&aws.EndpointNotFoundError{}) + if errors.As(err, &nf) { + ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false) + return next.HandleSerialize(ctx, in) + } return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } @@ -129,27 +143,10 @@ func removeResolveEndpointMiddleware(stack *middleware.Stack) error { type wrappedEndpointResolver struct { awsResolver aws.EndpointResolverWithOptions - resolver EndpointResolver } func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) { - if w.awsResolver == nil { - goto fallback - } - endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options) - if err == nil { - return endpoint, nil - } - - if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) { - return endpoint, err - } - -fallback: - if w.resolver == nil { - return endpoint, fmt.Errorf("default endpoint resolver provided was nil") - } - return w.resolver.ResolveEndpoint(region, options) + return w.awsResolver.ResolveEndpoint(ServiceID, region, options) } type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error) @@ -160,12 +157,13 @@ func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, opti var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil) -// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver. -// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided -// fallbackResolver for resolution. +// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver. +// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error, +// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked +// via its middleware. // -// fallbackResolver must not be nil -func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver { +// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated. +func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver { var resolver aws.EndpointResolverWithOptions if awsResolverWithOptions != nil { @@ -176,7 +174,6 @@ func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptio return &wrappedEndpointResolver{ awsResolver: resolver, - resolver: fallbackResolver, } } @@ -206,3 +203,5542 @@ func finalizeClientEndpointResolverOptions(options *Options) { } } + +func resolveEndpointResolverV2(options *Options) { + if options.EndpointResolverV2 == nil { + options.EndpointResolverV2 = NewDefaultEndpointResolverV2() + } +} + +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_S3") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "S3", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + +// EndpointParameters provides the parameters that influence how endpoints are +// resolved. +type EndpointParameters struct { + // The S3 bucket used to send the request. This is an optional parameter that will + // be set automatically for operations that are scoped to an S3 bucket. + // + // Parameter + // is required. + Bucket *string + + // The AWS region used to dispatch the request. + // + // Parameter is + // required. + // + // AWS::Region + Region *string + + // When true, send this request to the FIPS-compliant regional endpoint. If the + // configured endpoint does not have a FIPS compliant endpoint, dispatching the + // request will return an error. + // + // Defaults to false if no value is + // provided. + // + // AWS::UseFIPS + UseFIPS *bool + + // When true, use the dual-stack endpoint. If the configured endpoint does not + // support dual-stack, dispatching the request MAY return an error. + // + // Defaults to + // false if no value is provided. + // + // AWS::UseDualStack + UseDualStack *bool + + // Override the endpoint used to send this request + // + // Parameter is + // required. + // + // SDK::Endpoint + Endpoint *string + + // When true, force a path-style endpoint to be used where the bucket name is part + // of the path. + // + // Defaults to false if no value is + // provided. + // + // AWS::S3::ForcePathStyle + ForcePathStyle *bool + + // When true, use S3 Accelerate. NOTE: Not all regions support S3 + // accelerate. + // + // Defaults to false if no value is provided. + // + // AWS::S3::Accelerate + Accelerate *bool + + // Whether the global endpoint should be used, rather then the regional endpoint + // for us-east-1. + // + // Defaults to false if no value is + // provided. + // + // AWS::S3::UseGlobalEndpoint + UseGlobalEndpoint *bool + + // Internal parameter to use object lambda endpoint for an operation (eg: + // WriteGetObjectResponse) + // + // Parameter is required. + UseObjectLambdaEndpoint *bool + + // The S3 Key used to send the request. This is an optional parameter that will be + // set automatically for operations that are scoped to an S3 Key. + // + // Parameter is + // required. + Key *string + + // The S3 Prefix used to send the request. This is an optional parameter that will + // be set automatically for operations that are scoped to an S3 Prefix. + // + // Parameter + // is required. + Prefix *string + + // Internal parameter to disable Access Point Buckets + // + // Parameter is required. + DisableAccessPoints *bool + + // Whether multi-region access points (MRAP) should be disabled. + // + // Defaults to false + // if no value is provided. + // + // AWS::S3::DisableMultiRegionAccessPoints + DisableMultiRegionAccessPoints *bool + + // When an Access Point ARN is provided and this flag is enabled, the SDK MUST use + // the ARN's region when constructing the endpoint instead of the client's + // configured region. + // + // Parameter is required. + // + // AWS::S3::UseArnRegion + UseArnRegion *bool + + // Internal parameter to indicate whether S3Express operation should use control + // plane, (ex. CreateBucket) + // + // Parameter is required. + UseS3ExpressControlEndpoint *bool + + // Parameter to indicate whether S3Express session auth should be + // disabled + // + // Parameter is required. + DisableS3ExpressSessionAuth *bool +} + +// ValidateRequired validates required parameters are set. +func (p EndpointParameters) ValidateRequired() error { + if p.Accelerate == nil { + return fmt.Errorf("parameter Accelerate is required") + } + + if p.DisableMultiRegionAccessPoints == nil { + return fmt.Errorf("parameter DisableMultiRegionAccessPoints is required") + } + + if p.ForcePathStyle == nil { + return fmt.Errorf("parameter ForcePathStyle is required") + } + + if p.UseDualStack == nil { + return fmt.Errorf("parameter UseDualStack is required") + } + + if p.UseFIPS == nil { + return fmt.Errorf("parameter UseFIPS is required") + } + + if p.UseGlobalEndpoint == nil { + return fmt.Errorf("parameter UseGlobalEndpoint is required") + } + + return nil +} + +// WithDefaults returns a shallow copy of EndpointParameterswith default values +// applied to members where applicable. +func (p EndpointParameters) WithDefaults() EndpointParameters { + if p.Accelerate == nil { + p.Accelerate = ptr.Bool(false) + } + + if p.DisableMultiRegionAccessPoints == nil { + p.DisableMultiRegionAccessPoints = ptr.Bool(false) + } + + if p.ForcePathStyle == nil { + p.ForcePathStyle = ptr.Bool(false) + } + + if p.UseDualStack == nil { + p.UseDualStack = ptr.Bool(false) + } + + if p.UseFIPS == nil { + p.UseFIPS = ptr.Bool(false) + } + + if p.UseGlobalEndpoint == nil { + p.UseGlobalEndpoint = ptr.Bool(false) + } + return p +} + +// EndpointResolverV2 provides the interface for resolving service endpoints. +type EndpointResolverV2 interface { + // ResolveEndpoint attempts to resolve the endpoint with the provided options, + // returning the endpoint if found. Otherwise an error is returned. + ResolveEndpoint(ctx context.Context, params EndpointParameters) ( + smithyendpoints.Endpoint, error, + ) +} + +// resolver provides the implementation for resolving endpoints. +type resolver struct{} + +func NewDefaultEndpointResolverV2() EndpointResolverV2 { + return &resolver{} +} + +// ResolveEndpoint attempts to resolve the endpoint with the provided options, +// returning the endpoint if found. Otherwise an error is returned. +func (r *resolver) ResolveEndpoint( + ctx context.Context, params EndpointParameters, +) ( + endpoint smithyendpoints.Endpoint, err error, +) { + params = params.WithDefaults() + if err = params.ValidateRequired(); err != nil { + return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err) + } + _UseFIPS := *params.UseFIPS + _UseDualStack := *params.UseDualStack + _ForcePathStyle := *params.ForcePathStyle + _Accelerate := *params.Accelerate + _UseGlobalEndpoint := *params.UseGlobalEndpoint + _DisableMultiRegionAccessPoints := *params.DisableMultiRegionAccessPoints + + if exprVal := params.Region; exprVal != nil { + _Region := *exprVal + _ = _Region + if _Accelerate == true { + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Accelerate cannot be used with FIPS") + } + } + if _UseDualStack == true { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + return endpoint, fmt.Errorf("endpoint rule error, %s", "Cannot set dual-stack in combination with a custom endpoint.") + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with FIPS") + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "A custom endpoint cannot be combined with S3 Accelerate") + } + } + if _UseFIPS == true { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _partitionResult.Name == "aws-cn" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Partition does not support FIPS") + } + } + } + if exprVal := params.Bucket; exprVal != nil { + _Bucket := *exprVal + _ = _Bucket + if exprVal := rulesfn.SubString(_Bucket, 0, 6, true); exprVal != nil { + _bucketSuffix := *exprVal + _ = _bucketSuffix + if _bucketSuffix == "--x-s3" { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support Dual-stack.") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express does not support S3 Accelerate.") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if _url.IsIp == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { + _UseS3ExpressControlEndpoint := *exprVal + _ = _UseS3ExpressControlEndpoint + if _UseS3ExpressControlEndpoint == true { + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + if !(params.Endpoint != nil) { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + if exprVal := params.DisableS3ExpressSessionAuth; exprVal != nil { + _DisableS3ExpressSessionAuth := *exprVal + _ = _DisableS3ExpressSessionAuth + if _DisableS3ExpressSessionAuth == true { + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 14, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 14, 16, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := rulesfn.SubString(_Bucket, 6, 15, true); exprVal != nil { + _s3expressAvailabilityZoneId := *exprVal + _ = _s3expressAvailabilityZoneId + if exprVal := rulesfn.SubString(_Bucket, 15, 17, true); exprVal != nil { + _s3expressAvailabilityZoneDelim := *exprVal + _ = _s3expressAvailabilityZoneDelim + if _s3expressAvailabilityZoneDelim == "--" { + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-fips-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3express-") + out.WriteString(_s3expressAvailabilityZoneId) + out.WriteString(".") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "sigv4-s3express", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Unrecognized S3Express bucket name format.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3Express bucket name is not a valid virtual hostable name.") + } + } + } + if !(params.Bucket != nil) { + if exprVal := params.UseS3ExpressControlEndpoint; exprVal != nil { + _UseS3ExpressControlEndpoint := *exprVal + _ = _UseS3ExpressControlEndpoint + if _UseS3ExpressControlEndpoint == true { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control-fips.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3express-control.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + out.Set("backend", "S3Express") + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3express") + smithyhttp.SetSigV4ASigningName(&sp, "s3express") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + if exprVal := params.Bucket; exprVal != nil { + _Bucket := *exprVal + _ = _Bucket + if exprVal := rulesfn.SubString(_Bucket, 49, 50, true); exprVal != nil { + _hardwareType := *exprVal + _ = _hardwareType + if exprVal := rulesfn.SubString(_Bucket, 8, 12, true); exprVal != nil { + _regionPrefix := *exprVal + _ = _regionPrefix + if exprVal := rulesfn.SubString(_Bucket, 0, 7, true); exprVal != nil { + _bucketAliasSuffix := *exprVal + _ = _bucketAliasSuffix + if exprVal := rulesfn.SubString(_Bucket, 32, 49, true); exprVal != nil { + _outpostId := *exprVal + _ = _outpostId + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _regionPartition := *exprVal + _ = _regionPartition + if _bucketAliasSuffix == "--op-s3" { + if rulesfn.IsValidHostLabel(_outpostId, false) { + if _hardwareType == "e" { + if _regionPrefix == "beta" { + if !(params.Endpoint != nil) { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".ec2.") + out.WriteString(_url.Authority) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".ec2.s3-outposts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_regionPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + if _hardwareType == "o" { + if _regionPrefix == "beta" { + if !(params.Endpoint != nil) { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Expected a endpoint to be specified but no endpoint was found") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".op-") + out.WriteString(_outpostId) + out.WriteString(".") + out.WriteString(_url.Authority) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".op-") + out.WriteString(_outpostId) + out.WriteString(".s3-outposts.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_regionPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Unrecognized hardware type: \"Expected hardware type o or e but got ") + out.WriteString(_hardwareType) + out.WriteString("\"") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The outpost Id must only contain a-z, A-Z, 0-9 and `-`.") + } + } + } + } + } + } + } + if exprVal := params.Bucket; exprVal != nil { + _Bucket := *exprVal + _ = _Bucket + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if !(rulesfn.ParseURL(_Endpoint) != nil) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Custom endpoint `") + out.WriteString(_Endpoint) + out.WriteString("` was not a valid URI") + return out.String() + }()) + } + } + if _ForcePathStyle == false { + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, false) { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if rulesfn.IsValidHostLabel(_Region, false) { + if _Accelerate == true { + if _partitionResult.Name == "aws-cn" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Accelerate cannot be used in this region") + } + } + if _UseDualStack == true { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == true { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.dualstack.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.dualstack.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == true { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == true { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_Bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == false { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_Bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_Bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_Bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.IsIp == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3-accelerate.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if _UseFIPS == false { + if _Accelerate == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_Bucket) + out.WriteString(".s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _url.Scheme == "http" { + if awsrulesfn.IsVirtualHostableS3Bucket(_Bucket, true) { + if _ForcePathStyle == false { + if _UseFIPS == false { + if _UseDualStack == false { + if _Accelerate == false { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if rulesfn.IsValidHostLabel(_Region, false) { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_Bucket) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + } + } + } + } + } + } + if _ForcePathStyle == false { + if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { + _bucketArn := *exprVal + _ = _bucketArn + if exprVal := _bucketArn.ResourceId.Get(0); exprVal != nil { + _arnType := *exprVal + _ = _arnType + if !(_arnType == "") { + if _bucketArn.Service == "s3-object-lambda" { + if _arnType == "accesspoint" { + if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { + _accessPointName := *exprVal + _ = _accessPointName + if !(_accessPointName == "") { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate") + } + if !(_bucketArn.Region == "") { + if exprVal := params.DisableAccessPoints; exprVal != nil { + _DisableAccessPoints := *exprVal + _ = _DisableAccessPoints + if _DisableAccessPoints == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation") + } + } + if !(_bucketArn.ResourceId.Get(2) != nil) { + if exprVal := params.UseArnRegion; exprVal != nil { + _UseArnRegion := *exprVal + _ = _UseArnRegion + if _UseArnRegion == false { + if !(_bucketArn.Region == _Region) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid configuration: region from ARN `") + out.WriteString(_bucketArn.Region) + out.WriteString("` does not match client region `") + out.WriteString(_Region) + out.WriteString("` and UseArnRegion is `false`") + return out.String() + }()) + } + } + } + if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { + _bucketPartition := *exprVal + _ = _bucketPartition + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _bucketPartition.Name == _partitionResult.Name { + if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { + if _bucketArn.AccountId == "" { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Missing account id") + } + if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { + if rulesfn.IsValidHostLabel(_accessPointName, false) { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-object-lambda-fips.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-object-lambda.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_accessPointName) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_bucketArn.AccountId) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid region in ARN: `") + out.WriteString(_bucketArn.Region) + out.WriteString("` (invalid DNS name)") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Client was configured for partition `") + out.WriteString(_partitionResult.Name) + out.WriteString("` but ARN (`") + out.WriteString(_Bucket) + out.WriteString("`) has `") + out.WriteString(_bucketPartition.Name) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: bucket ARN is missing a region") + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: Object Lambda ARNs only support `accesspoint` arn types, but found: `") + out.WriteString(_arnType) + out.WriteString("`") + return out.String() + }()) + } + if _arnType == "accesspoint" { + if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { + _accessPointName := *exprVal + _ = _accessPointName + if !(_accessPointName == "") { + if !(_bucketArn.Region == "") { + if _arnType == "accesspoint" { + if !(_bucketArn.Region == "") { + if exprVal := params.DisableAccessPoints; exprVal != nil { + _DisableAccessPoints := *exprVal + _ = _DisableAccessPoints + if _DisableAccessPoints == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Access points are not supported for this operation") + } + } + if !(_bucketArn.ResourceId.Get(2) != nil) { + if exprVal := params.UseArnRegion; exprVal != nil { + _UseArnRegion := *exprVal + _ = _UseArnRegion + if _UseArnRegion == false { + if !(_bucketArn.Region == _Region) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid configuration: region from ARN `") + out.WriteString(_bucketArn.Region) + out.WriteString("` does not match client region `") + out.WriteString(_Region) + out.WriteString("` and UseArnRegion is `false`") + return out.String() + }()) + } + } + } + if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { + _bucketPartition := *exprVal + _ = _bucketPartition + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _bucketPartition.Name == _partitionResult.Name { + if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { + if _bucketArn.Service == "s3" { + if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { + if rulesfn.IsValidHostLabel(_accessPointName, false) { + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Access Points do not support S3 Accelerate") + } + if _UseFIPS == true { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-accesspoint-fips.dualstack.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-accesspoint-fips.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == false { + if _UseDualStack == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-accesspoint.dualstack.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".s3-accesspoint.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The access point name may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_accessPointName) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_bucketArn.AccountId) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The ARN was not for the S3 service, found: ") + out.WriteString(_bucketArn.Service) + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid region in ARN: `") + out.WriteString(_bucketArn.Region) + out.WriteString("` (invalid DNS name)") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Client was configured for partition `") + out.WriteString(_partitionResult.Name) + out.WriteString("` but ARN (`") + out.WriteString(_Bucket) + out.WriteString("`) has `") + out.WriteString(_bucketPartition.Name) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The ARN may only contain a single resource component after `accesspoint`.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + if rulesfn.IsValidHostLabel(_accessPointName, true) { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support dual-stack") + } + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support FIPS") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 MRAP does not support S3 Accelerate") + } + if _DisableMultiRegionAccessPoints == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid configuration: Multi-Region Access Point ARNs are disabled.") + } + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _mrapPartition := *exprVal + _ = _mrapPartition + if _mrapPartition.Name == _bucketArn.Partition { + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString(".accesspoint.s3-global.") + out.WriteString(_mrapPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4a", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4ASigningRegions(&sp, []string{"*"}) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Client was configured for partition `") + out.WriteString(_mrapPartition.Name) + out.WriteString("` but bucket referred to partition `") + out.WriteString(_bucketArn.Partition) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Access Point Name") + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a resource of the format `accesspoint:` but no name was provided") + } + if _bucketArn.Service == "s3-outposts" { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support Dual-stack") + } + if _UseFIPS == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support FIPS") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Outposts does not support S3 Accelerate") + } + if exprVal := _bucketArn.ResourceId.Get(4); exprVal != nil { + _var_275 := *exprVal + _ = _var_275 + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Arn: Outpost Access Point ARN contains sub resources") + } + if exprVal := _bucketArn.ResourceId.Get(1); exprVal != nil { + _outpostId := *exprVal + _ = _outpostId + if rulesfn.IsValidHostLabel(_outpostId, false) { + if exprVal := params.UseArnRegion; exprVal != nil { + _UseArnRegion := *exprVal + _ = _UseArnRegion + if _UseArnRegion == false { + if !(_bucketArn.Region == _Region) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid configuration: region from ARN `") + out.WriteString(_bucketArn.Region) + out.WriteString("` does not match client region `") + out.WriteString(_Region) + out.WriteString("` and UseArnRegion is `false`") + return out.String() + }()) + } + } + } + if exprVal := awsrulesfn.GetPartition(_bucketArn.Region); exprVal != nil { + _bucketPartition := *exprVal + _ = _bucketPartition + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _bucketPartition.Name == _partitionResult.Name { + if rulesfn.IsValidHostLabel(_bucketArn.Region, true) { + if rulesfn.IsValidHostLabel(_bucketArn.AccountId, false) { + if exprVal := _bucketArn.ResourceId.Get(2); exprVal != nil { + _outpostType := *exprVal + _ = _outpostType + if exprVal := _bucketArn.ResourceId.Get(3); exprVal != nil { + _accessPointName := *exprVal + _ = _accessPointName + if _outpostType == "accesspoint" { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".") + out.WriteString(_outpostId) + out.WriteString(".") + out.WriteString(_url.Authority) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://") + out.WriteString(_accessPointName) + out.WriteString("-") + out.WriteString(_bucketArn.AccountId) + out.WriteString(".") + out.WriteString(_outpostId) + out.WriteString(".s3-outposts.") + out.WriteString(_bucketArn.Region) + out.WriteString(".") + out.WriteString(_bucketPartition.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-outposts") + smithyhttp.SetSigV4ASigningName(&sp, "s3-outposts") + + smithyhttp.SetSigV4SigningRegion(&sp, _bucketArn.Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Expected an outpost type `accesspoint`, found ") + out.WriteString(_outpostType) + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: expected an access point name") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: Expected a 4-component resource") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The account id may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_bucketArn.AccountId) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid region in ARN: `") + out.WriteString(_bucketArn.Region) + out.WriteString("` (invalid DNS name)") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Client was configured for partition `") + out.WriteString(_partitionResult.Name) + out.WriteString("` but ARN (`") + out.WriteString(_Bucket) + out.WriteString("`) has `") + out.WriteString(_bucketPartition.Name) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: The outpost Id may only contain a-z, A-Z, 0-9 and `-`. Found: `") + out.WriteString(_outpostId) + out.WriteString("`") + return out.String() + }()) + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: The Outpost Id was not set") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: Unrecognized format: ") + out.WriteString(_Bucket) + out.WriteString(" (type: ") + out.WriteString(_arnType) + out.WriteString(")") + return out.String() + }()) + } + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid ARN: No ARN type specified") + } + } + if exprVal := rulesfn.SubString(_Bucket, 0, 4, false); exprVal != nil { + _arnPrefix := *exprVal + _ = _arnPrefix + if _arnPrefix == "arn:" { + if !(awsrulesfn.ParseARN(_Bucket) != nil) { + return endpoint, fmt.Errorf("endpoint rule error, %s", func() string { + var out strings.Builder + out.WriteString("Invalid ARN: `") + out.WriteString(_Bucket) + out.WriteString("` was not a valid ARN") + return out.String() + }()) + } + } + } + if _ForcePathStyle == true { + if exprVal := awsrulesfn.ParseARN(_Bucket); exprVal != nil { + _var_288 := *exprVal + _ = _var_288 + return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with ARN buckets") + } + } + _uri_encoded_bucket := rulesfn.URIEncode(_Bucket) + _ = _uri_encoded_bucket + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if _Accelerate == false { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == true { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _UseFIPS == false { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.NormalizedPath) + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _UseFIPS == false { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + out.WriteString("/") + out.WriteString(_uri_encoded_bucket) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Path-style addressing cannot be used with S3 Accelerate") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + if exprVal := params.UseObjectLambdaEndpoint; exprVal != nil { + _UseObjectLambdaEndpoint := *exprVal + _ = _UseObjectLambdaEndpoint + if _UseObjectLambdaEndpoint == true { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if rulesfn.IsValidHostLabel(_Region, true) { + if _UseDualStack == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support Dual-stack") + } + if _Accelerate == true { + return endpoint, fmt.Errorf("endpoint rule error, %s", "S3 Object Lambda does not support S3 Accelerate") + } + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + if _UseFIPS == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-object-lambda-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-object-lambda.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3-object-lambda") + smithyhttp.SetSigV4ASigningName(&sp, "s3-object-lambda") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + } + if !(params.Bucket != nil) { + if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil { + _partitionResult := *exprVal + _ = _partitionResult + if rulesfn.IsValidHostLabel(_Region, true) { + if _UseFIPS == true { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == true { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3-fips.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.us-east-1.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == true { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.dualstack.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if exprVal := params.Endpoint; exprVal != nil { + _Endpoint := *exprVal + _ = _Endpoint + if exprVal := rulesfn.ParseURL(_Endpoint); exprVal != nil { + _url := *exprVal + _ = _url + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString(_url.Scheme) + out.WriteString("://") + out.WriteString(_url.Authority) + out.WriteString(_url.Path) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if _Region == "aws-global" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1") + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == true { + if _Region == "us-east-1" { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + if _UseFIPS == false { + if _UseDualStack == false { + if !(params.Endpoint != nil) { + if !(_Region == "aws-global") { + if _UseGlobalEndpoint == false { + uriString := func() string { + var out strings.Builder + out.WriteString("https://s3.") + out.WriteString(_Region) + out.WriteString(".") + out.WriteString(_partitionResult.DnsSuffix) + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + Properties: func() smithy.Properties { + var out smithy.Properties + smithyauth.SetAuthOptions(&out, []*smithyauth.Option{ + { + SchemeID: "aws.auth#sigv4", + SignerProperties: func() smithy.Properties { + var sp smithy.Properties + smithyhttp.SetDisableDoubleEncoding(&sp, true) + + smithyhttp.SetSigV4SigningName(&sp, "s3") + smithyhttp.SetSigV4ASigningName(&sp, "s3") + + smithyhttp.SetSigV4SigningRegion(&sp, _Region) + return sp + }(), + }, + }) + return out + }(), + }, nil + } + } + } + } + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid region: region was not a valid DNS name.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.") + } + return endpoint, fmt.Errorf("endpoint rule error, %s", "A region must be set when sending requests to S3.") +} + +type endpointParamsBinder interface { + bindEndpointParams(*EndpointParameters) +} + +func bindEndpointParams(input interface{}, options Options) *EndpointParameters { + params := &EndpointParameters{} + + params.Region = aws.String(endpoints.MapFIPSRegion(options.Region)) + params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled) + params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled) + params.Endpoint = options.BaseEndpoint + params.ForcePathStyle = aws.Bool(options.UsePathStyle) + params.Accelerate = aws.Bool(options.UseAccelerate) + params.DisableMultiRegionAccessPoints = aws.Bool(options.DisableMultiRegionAccessPoints) + params.UseArnRegion = aws.Bool(options.UseARNRegion) + + params.DisableS3ExpressSessionAuth = options.DisableS3ExpressSessionAuth + + if b, ok := input.(endpointParamsBinder); ok { + b.bindEndpointParams(params) + } + + return params +} + +type resolveEndpointV2Middleware struct { + options Options +} + +func (*resolveEndpointV2Middleware) ID() string { + return "ResolveEndpointV2" +} + +func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleFinalize(ctx, in) + } + + req, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + if m.options.EndpointResolverV2 == nil { + return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") + } + + params := bindEndpointParams(getOperationInput(ctx), m.options) + endpt, err := m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params) + if err != nil { + return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) + } + + if endpt.URI.RawPath == "" && req.URL.RawPath != "" { + endpt.URI.RawPath = endpt.URI.Path + } + req.URL.Scheme = endpt.URI.Scheme + req.URL.Host = endpt.URI.Host + req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path) + req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath) + for k := range endpt.Headers { + req.Header.Set(k, endpt.Headers.Get(k)) + } + + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + opts, _ := smithyauth.GetAuthOptions(&endpt.Properties) + for _, o := range opts { + rscheme.SignerProperties.SetAll(&o.SignerProperties) + } + + backend := s3cust.GetPropertiesBackend(&endpt.Properties) + ctx = internalcontext.SetS3Backend(ctx, backend) + + return next.HandleFinalize(ctx, in) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go new file mode 100644 index 00000000..bbac9ca2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express.go @@ -0,0 +1,9 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" +) + +// ExpressCredentialsProvider retrieves credentials for operations against the +// S3Express storage class. +type ExpressCredentialsProvider = customizations.S3ExpressCredentialsProvider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go new file mode 100644 index 00000000..3b35a3e5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_default.go @@ -0,0 +1,170 @@ +package s3 + +import ( + "context" + "crypto/hmac" + "crypto/sha256" + "errors" + "fmt" + "sync" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/aws-sdk-go-v2/internal/sync/singleflight" + "github.com/aws/smithy-go/container/private/cache" + "github.com/aws/smithy-go/container/private/cache/lru" +) + +const s3ExpressCacheCap = 100 + +const s3ExpressRefreshWindow = 1 * time.Minute + +type cacheKey struct { + CredentialsHash string // hmac(sigv4 akid, sigv4 secret) + Bucket string +} + +func (c cacheKey) Slug() string { + return fmt.Sprintf("%s%s", c.CredentialsHash, c.Bucket) +} + +type sessionCredsCache struct { + mu sync.Mutex + cache cache.Cache +} + +func (c *sessionCredsCache) Get(key cacheKey) (*aws.Credentials, bool) { + c.mu.Lock() + defer c.mu.Unlock() + + if v, ok := c.cache.Get(key); ok { + return v.(*aws.Credentials), true + } + return nil, false +} + +func (c *sessionCredsCache) Put(key cacheKey, creds *aws.Credentials) { + c.mu.Lock() + defer c.mu.Unlock() + + c.cache.Put(key, creds) +} + +// The default S3Express provider uses an LRU cache with a capacity of 100. +// +// Credentials will be refreshed asynchronously when a Retrieve() call is made +// for cached credentials within an expiry window (1 minute, currently +// non-configurable). +type defaultS3ExpressCredentialsProvider struct { + sf singleflight.Group + + client createSessionAPIClient + cache *sessionCredsCache + refreshWindow time.Duration + v4creds aws.CredentialsProvider // underlying credentials used for CreateSession +} + +type createSessionAPIClient interface { + CreateSession(context.Context, *CreateSessionInput, ...func(*Options)) (*CreateSessionOutput, error) +} + +func newDefaultS3ExpressCredentialsProvider() *defaultS3ExpressCredentialsProvider { + return &defaultS3ExpressCredentialsProvider{ + cache: &sessionCredsCache{ + cache: lru.New(s3ExpressCacheCap), + }, + refreshWindow: s3ExpressRefreshWindow, + } +} + +// returns a cloned provider using new base credentials, used when per-op +// config mutations change the credentials provider +func (p *defaultS3ExpressCredentialsProvider) CloneWithBaseCredentials(v4creds aws.CredentialsProvider) *defaultS3ExpressCredentialsProvider { + return &defaultS3ExpressCredentialsProvider{ + client: p.client, + cache: p.cache, + refreshWindow: p.refreshWindow, + v4creds: v4creds, + } +} + +func (p *defaultS3ExpressCredentialsProvider) Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) { + v4creds, err := p.v4creds.Retrieve(ctx) + if err != nil { + return aws.Credentials{}, fmt.Errorf("get sigv4 creds: %w", err) + } + + key := cacheKey{ + CredentialsHash: gethmac(v4creds.AccessKeyID, v4creds.SecretAccessKey), + Bucket: bucket, + } + creds, ok := p.cache.Get(key) + if !ok || creds.Expired() { + return p.awaitDoChanRetrieve(ctx, key) + } + + if creds.Expires.Sub(sdk.NowTime()) <= p.refreshWindow { + p.doChanRetrieve(ctx, key) + } + + return *creds, nil +} + +func (p *defaultS3ExpressCredentialsProvider) doChanRetrieve(ctx context.Context, key cacheKey) <-chan singleflight.Result { + return p.sf.DoChan(key.Slug(), func() (interface{}, error) { + return p.retrieve(ctx, key) + }) +} + +func (p *defaultS3ExpressCredentialsProvider) awaitDoChanRetrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) { + ch := p.doChanRetrieve(ctx, key) + + select { + case r := <-ch: + return r.Val.(aws.Credentials), r.Err + case <-ctx.Done(): + return aws.Credentials{}, errors.New("s3express retrieve credentials canceled") + } +} + +func (p *defaultS3ExpressCredentialsProvider) retrieve(ctx context.Context, key cacheKey) (aws.Credentials, error) { + resp, err := p.client.CreateSession(ctx, &CreateSessionInput{ + Bucket: aws.String(key.Bucket), + }) + if err != nil { + return aws.Credentials{}, err + } + + creds, err := credentialsFromResponse(resp) + if err != nil { + return aws.Credentials{}, err + } + + p.cache.Put(key, creds) + return *creds, nil +} + +func credentialsFromResponse(o *CreateSessionOutput) (*aws.Credentials, error) { + if o.Credentials == nil { + return nil, errors.New("s3express session credentials unset") + } + + if o.Credentials.AccessKeyId == nil || o.Credentials.SecretAccessKey == nil || o.Credentials.SessionToken == nil || o.Credentials.Expiration == nil { + return nil, errors.New("s3express session credentials missing one or more required fields") + } + + return &aws.Credentials{ + AccessKeyID: *o.Credentials.AccessKeyId, + SecretAccessKey: *o.Credentials.SecretAccessKey, + SessionToken: *o.Credentials.SessionToken, + CanExpire: true, + Expires: *o.Credentials.Expiration, + }, nil +} + +func gethmac(p, key string) string { + hash := hmac.New(sha256.New, []byte(key)) + hash.Write([]byte(p)) + return string(hash.Sum(nil)) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go new file mode 100644 index 00000000..18d6c06a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/express_resolve.go @@ -0,0 +1,44 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" +) + +// If the caller hasn't provided an S3Express provider, we use our default +// which will grab a reference to the S3 client itself in finalization. +func resolveExpressCredentials(o *Options) { + if o.ExpressCredentials == nil { + o.ExpressCredentials = newDefaultS3ExpressCredentialsProvider() + } +} + +// Config finalizer: if we're using the default S3Express implementation, grab +// a reference to the client for its CreateSession API, and the underlying +// sigv4 credentials provider for cache keying. +func finalizeExpressCredentials(o *Options, c *Client) { + if p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider); ok { + p.client = c + p.v4creds = o.Credentials + } +} + +// Operation config finalizer: update the sigv4 credentials on the default +// express provider if it changed to ensure different cache keys +func finalizeOperationExpressCredentials(o *Options, c Client) { + p, ok := o.ExpressCredentials.(*defaultS3ExpressCredentialsProvider) + if !ok { + return + } + + if c.options.Credentials != o.Credentials { + o.ExpressCredentials = p.CloneWithBaseCredentials(o.Credentials) + } +} + +// NewFromConfig resolver: pull from opaque sources if it exists. +func resolveDisableExpressAuth(cfg aws.Config, o *Options) { + if v, ok := customizations.ResolveDisableExpressAuth(cfg.ConfigSources); ok { + o.DisableS3ExpressSessionAuth = &v + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json index 8643d42f..4e666764 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json @@ -9,7 +9,8 @@ "github.com/aws/aws-sdk-go-v2/service/internal/checksum": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", "github.com/aws/aws-sdk-go-v2/service/internal/s3shared": "v1.2.3", - "github.com/aws/smithy-go": "v1.4.0" + "github.com/aws/smithy-go": "v1.4.0", + "github.com/google/go-cmp": "v0.5.4" }, "files": [ "api_client.go", @@ -19,6 +20,7 @@ "api_op_CopyObject.go", "api_op_CreateBucket.go", "api_op_CreateMultipartUpload.go", + "api_op_CreateSession.go", "api_op_DeleteBucket.go", "api_op_DeleteBucketAnalyticsConfiguration.go", "api_op_DeleteBucketCors.go", @@ -72,6 +74,7 @@ "api_op_ListBucketInventoryConfigurations.go", "api_op_ListBucketMetricsConfigurations.go", "api_op_ListBuckets.go", + "api_op_ListDirectoryBuckets.go", "api_op_ListMultipartUploads.go", "api_op_ListObjectVersions.go", "api_op_ListObjects.go", @@ -107,13 +110,17 @@ "api_op_UploadPart.go", "api_op_UploadPartCopy.go", "api_op_WriteGetObjectResponse.go", + "auth.go", "deserializers.go", "doc.go", "endpoints.go", + "endpoints_config_test.go", + "endpoints_test.go", "eventstream.go", "generated.json", "internal/endpoints/endpoints.go", "internal/endpoints/endpoints_test.go", + "options.go", "protocol_test.go", "serializers.go", "types/enums.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go index 1e21a6cf..bff6ac9a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -3,4 +3,4 @@ package s3 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.31.0" +const goModuleVersion = "1.48.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go new file mode 100644 index 00000000..6aae79e7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/handwritten_paginators.go @@ -0,0 +1,214 @@ +package s3 + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// ListObjectVersionsAPIClient is a client that implements the ListObjectVersions +// operation +type ListObjectVersionsAPIClient interface { + ListObjectVersions(context.Context, *ListObjectVersionsInput, ...func(*Options)) (*ListObjectVersionsOutput, error) +} + +var _ ListObjectVersionsAPIClient = (*Client)(nil) + +// ListObjectVersionsPaginatorOptions is the paginator options for ListObjectVersions +type ListObjectVersionsPaginatorOptions struct { + // (Optional) The maximum number of Object Versions that you want Amazon S3 to + // return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListObjectVersionsPaginator is a paginator for ListObjectVersions +type ListObjectVersionsPaginator struct { + options ListObjectVersionsPaginatorOptions + client ListObjectVersionsAPIClient + params *ListObjectVersionsInput + firstPage bool + keyMarker *string + versionIDMarker *string + isTruncated bool +} + +// NewListObjectVersionsPaginator returns a new ListObjectVersionsPaginator +func NewListObjectVersionsPaginator(client ListObjectVersionsAPIClient, params *ListObjectVersionsInput, optFns ...func(*ListObjectVersionsPaginatorOptions)) *ListObjectVersionsPaginator { + if params == nil { + params = &ListObjectVersionsInput{} + } + + options := ListObjectVersionsPaginatorOptions{} + if params.MaxKeys != nil { + options.Limit = aws.ToInt32(params.MaxKeys) + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListObjectVersionsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + keyMarker: params.KeyMarker, + versionIDMarker: params.VersionIdMarker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListObjectVersionsPaginator) HasMorePages() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListObjectVersions page. +func (p *ListObjectVersionsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListObjectVersionsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.KeyMarker = p.keyMarker + params.VersionIdMarker = p.versionIDMarker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + if limit > 0 { + params.MaxKeys = aws.Int32(limit) + } + + result, err := p.client.ListObjectVersions(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.keyMarker + p.isTruncated = aws.ToBool(result.IsTruncated) + p.keyMarker = nil + p.versionIDMarker = nil + if aws.ToBool(result.IsTruncated) { + p.keyMarker = result.NextKeyMarker + p.versionIDMarker = result.NextVersionIdMarker + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.keyMarker != nil && + *prevToken == *p.keyMarker { + p.isTruncated = false + } + + return result, nil +} + +// ListMultipartUploadsAPIClient is a client that implements the ListMultipartUploads +// operation +type ListMultipartUploadsAPIClient interface { + ListMultipartUploads(context.Context, *ListMultipartUploadsInput, ...func(*Options)) (*ListMultipartUploadsOutput, error) +} + +var _ ListMultipartUploadsAPIClient = (*Client)(nil) + +// ListMultipartUploadsPaginatorOptions is the paginator options for ListMultipartUploads +type ListMultipartUploadsPaginatorOptions struct { + // (Optional) The maximum number of Multipart Uploads that you want Amazon S3 to + // return. + Limit int32 + + // Set to true if pagination should stop if the service returns a pagination token + // that matches the most recent token provided to the service. + StopOnDuplicateToken bool +} + +// ListMultipartUploadsPaginator is a paginator for ListMultipartUploads +type ListMultipartUploadsPaginator struct { + options ListMultipartUploadsPaginatorOptions + client ListMultipartUploadsAPIClient + params *ListMultipartUploadsInput + firstPage bool + keyMarker *string + uploadIDMarker *string + isTruncated bool +} + +// NewListMultipartUploadsPaginator returns a new ListMultipartUploadsPaginator +func NewListMultipartUploadsPaginator(client ListMultipartUploadsAPIClient, params *ListMultipartUploadsInput, optFns ...func(*ListMultipartUploadsPaginatorOptions)) *ListMultipartUploadsPaginator { + if params == nil { + params = &ListMultipartUploadsInput{} + } + + options := ListMultipartUploadsPaginatorOptions{} + if params.MaxUploads != nil { + options.Limit = aws.ToInt32(params.MaxUploads) + } + + for _, fn := range optFns { + fn(&options) + } + + return &ListMultipartUploadsPaginator{ + options: options, + client: client, + params: params, + firstPage: true, + keyMarker: params.KeyMarker, + uploadIDMarker: params.UploadIdMarker, + } +} + +// HasMorePages returns a boolean indicating whether more pages are available +func (p *ListMultipartUploadsPaginator) HasMorePages() bool { + return p.firstPage || p.isTruncated +} + +// NextPage retrieves the next ListMultipartUploads page. +func (p *ListMultipartUploadsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListMultipartUploadsOutput, error) { + if !p.HasMorePages() { + return nil, fmt.Errorf("no more pages available") + } + + params := *p.params + params.KeyMarker = p.keyMarker + params.UploadIdMarker = p.uploadIDMarker + + var limit int32 + if p.options.Limit > 0 { + limit = p.options.Limit + } + if limit > 0 { + params.MaxUploads = aws.Int32(limit) + } + + result, err := p.client.ListMultipartUploads(ctx, ¶ms, optFns...) + if err != nil { + return nil, err + } + p.firstPage = false + + prevToken := p.keyMarker + p.isTruncated = aws.ToBool(result.IsTruncated) + p.keyMarker = nil + p.uploadIDMarker = nil + if aws.ToBool(result.IsTruncated) { + p.keyMarker = result.NextKeyMarker + p.uploadIDMarker = result.NextUploadIdMarker + } + + if p.options.StopOnDuplicateToken && + prevToken != nil && + p.keyMarker != nil && + *prevToken == *p.keyMarker { + p.isTruncated = false + } + + return result, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go new file mode 100644 index 00000000..91b8fde0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/context.go @@ -0,0 +1,21 @@ +package customizations + +import ( + "context" + + "github.com/aws/smithy-go/middleware" +) + +type bucketKey struct{} + +// SetBucket stores a bucket name within the request context, which is required +// for a variety of custom S3 behaviors. +func SetBucket(ctx context.Context, bucket string) context.Context { + return middleware.WithStackValue(ctx, bucketKey{}, bucket) +} + +// GetBucket retrieves a stored bucket name within a context. +func GetBucket(ctx context.Context) string { + v, _ := middleware.GetStackValue(ctx, bucketKey{}).(string) + return v +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go new file mode 100644 index 00000000..8cc0b362 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express.go @@ -0,0 +1,44 @@ +package customizations + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" +) + +// S3ExpressCredentialsProvider retrieves credentials for the S3Express storage +// class. +type S3ExpressCredentialsProvider interface { + Retrieve(ctx context.Context, bucket string) (aws.Credentials, error) +} + +// ExpressIdentityResolver retrieves identity for the S3Express storage class. +type ExpressIdentityResolver struct { + Provider S3ExpressCredentialsProvider +} + +var _ (auth.IdentityResolver) = (*ExpressIdentityResolver)(nil) + +// GetIdentity retrieves AWS credentials using the underlying provider. +func (v *ExpressIdentityResolver) GetIdentity(ctx context.Context, props smithy.Properties) ( + auth.Identity, error, +) { + bucket, ok := GetIdentityPropertiesBucket(&props) + if !ok { + bucket = GetBucket(ctx) + } + if bucket == "" { + return nil, fmt.Errorf("bucket name is missing") + } + + creds, err := v.Provider.Retrieve(ctx, bucket) + if err != nil { + return nil, fmt.Errorf("get credentials: %v", err) + } + + return &internalauthsmithy.CredentialsAdapter{Credentials: creds}, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go new file mode 100644 index 00000000..bb22d347 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_config.go @@ -0,0 +1,18 @@ +package customizations + +type s3DisableExpressAuthProvider interface { + GetS3DisableExpressAuth() (bool, bool) +} + +// ResolveDisableExpressAuth pulls S3DisableExpressAuth setting from config +// sources. +func ResolveDisableExpressAuth(configs []interface{}) (value bool, exists bool) { + for _, cfg := range configs { + if p, ok := cfg.(s3DisableExpressAuthProvider); ok { + if value, exists = p.GetS3DisableExpressAuth(); exists { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go new file mode 100644 index 00000000..cf3ff596 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_default_checksum.go @@ -0,0 +1,42 @@ +package customizations + +import ( + "context" + "fmt" + + ictx "github.com/aws/aws-sdk-go-v2/internal/context" + "github.com/aws/aws-sdk-go-v2/service/internal/checksum" + "github.com/aws/smithy-go/middleware" +) + +type expressDefaultChecksumMiddleware struct{} + +func (*expressDefaultChecksumMiddleware) ID() string { + return "expressDefaultChecksum" +} + +func (*expressDefaultChecksumMiddleware) HandleFinalize( + ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler, +) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if ictx.GetS3Backend(ctx) == ictx.S3BackendS3Express && ictx.GetChecksumInputAlgorithm(ctx) == "" { + ctx = ictx.SetChecksumInputAlgorithm(ctx, string(checksum.AlgorithmCRC32)) + } + return next.HandleFinalize(ctx, in) +} + +// AddExpressDefaultChecksumMiddleware appends a step to default to CRC32 for +// S3Express requests. This should only be applied to operations where a +// checksum is required (e.g. DeleteObject). +func AddExpressDefaultChecksumMiddleware(s *middleware.Stack) error { + err := s.Finalize.Insert( + &expressDefaultChecksumMiddleware{}, + "AWSChecksum:ComputeInputPayloadChecksum", + middleware.Before, + ) + if err != nil { + return fmt.Errorf("add expressDefaultChecksum: %v", err) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go new file mode 100644 index 00000000..171de461 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_properties.go @@ -0,0 +1,21 @@ +package customizations + +import "github.com/aws/smithy-go" + +// GetPropertiesBackend returns a resolved endpoint backend from the property +// set. +func GetPropertiesBackend(p *smithy.Properties) string { + v, _ := p.Get("backend").(string) + return v +} + +// GetIdentityPropertiesBucket returns the S3 bucket from identity properties. +func GetIdentityPropertiesBucket(ip *smithy.Properties) (string, bool) { + v, ok := ip.Get(bucketKey{}).(string) + return v, ok +} + +// SetIdentityPropertiesBucket sets the S3 bucket to identity properties. +func SetIdentityPropertiesBucket(ip *smithy.Properties, bucket string) { + ip.Set(bucketKey{}, bucket) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go new file mode 100644 index 00000000..545e5b22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer.go @@ -0,0 +1,109 @@ +package customizations + +import ( + "context" + "net/http" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/smithy-go/middleware" +) + +const ( + s3ExpressSignerVersion = "com.amazonaws.s3#sigv4express" + headerAmzSessionToken = "x-amz-s3session-token" +) + +// adapts a v4 signer for S3Express +type s3ExpressSignerAdapter struct { + v4 v4.HTTPSigner +} + +// SignHTTP performs S3Express signing on a request, which is identical to +// SigV4 signing save for an additional header containing the S3Express +// session token. +func (s *s3ExpressSignerAdapter) SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error { + r.Header.Set(headerAmzSessionToken, credentials.SessionToken) + optFns = append(optFns, func(o *v4.SignerOptions) { + o.DisableSessionToken = true + }) + return s.v4.SignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) +} + +// adapts S3ExpressCredentialsProvider to the standard AWS +// CredentialsProvider interface +type s3ExpressCredentialsAdapter struct { + provider S3ExpressCredentialsProvider + bucket string +} + +func (c *s3ExpressCredentialsAdapter) Retrieve(ctx context.Context) (aws.Credentials, error) { + return c.provider.Retrieve(ctx, c.bucket) +} + +// S3ExpressSignHTTPRequestMiddleware signs S3 S3Express requests. +// +// This is NOT mutually exclusive with existing v4 or v4a signer handling on +// the stack itself, but only one handler will actually perform signing based +// on the provided signing version in the context. +type S3ExpressSignHTTPRequestMiddleware struct { + Credentials S3ExpressCredentialsProvider + Signer v4.HTTPSigner + LogSigning bool +} + +// ID identifies S3ExpressSignHTTPRequestMiddleware. +func (*S3ExpressSignHTTPRequestMiddleware) ID() string { + return "S3ExpressSigning" +} + +// HandleFinalize will sign the request if the S3Express signer has been +// selected. +func (m *S3ExpressSignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, +) { + if GetSignerVersion(ctx) != s3ExpressSignerVersion { + return next.HandleFinalize(ctx, in) + } + + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: m.credentialsAdapter(ctx), + Signer: m.signerAdapter(), + LogSigning: m.LogSigning, + }) + return mw.HandleFinalize(ctx, in, next) +} + +func (m *S3ExpressSignHTTPRequestMiddleware) credentialsAdapter(ctx context.Context) aws.CredentialsProvider { + return &s3ExpressCredentialsAdapter{ + provider: m.Credentials, + bucket: GetBucket(ctx), + } +} + +func (m *S3ExpressSignHTTPRequestMiddleware) signerAdapter() v4.HTTPSigner { + return &s3ExpressSignerAdapter{v4: m.Signer} +} + +type s3ExpressPresignerAdapter struct { + v4 v4.HTTPPresigner +} + +// SignHTTP performs S3Express signing on a request, which is identical to +// SigV4 signing save for an additional header containing the S3Express +// session token. +func (s *s3ExpressPresignerAdapter) PresignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) ( + string, http.Header, error, +) { + r.Header.Set(headerAmzSessionToken, credentials.SessionToken) + optFns = append(optFns, func(o *v4.SignerOptions) { + o.DisableSessionToken = true + }) + return s.v4.PresignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) +} + +var ( + _ aws.CredentialsProvider = &s3ExpressCredentialsAdapter{} + _ v4.HTTPSigner = &s3ExpressSignerAdapter{} +) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go new file mode 100644 index 00000000..e3ec7f01 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/express_signer_smithy.go @@ -0,0 +1,61 @@ +package customizations + +import ( + "context" + "fmt" + + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + "github.com/aws/aws-sdk-go-v2/internal/sdk" + "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// ExpressSigner signs requests for the sigv4-s3express auth scheme. +// +// This signer respects the aws.auth#sigv4 properties for signing name and +// region. +type ExpressSigner struct { + Signer v4.HTTPSigner + Logger logging.Logger + LogSigning bool +} + +var _ (smithyhttp.Signer) = (*ExpressSigner)(nil) + +// SignRequest signs the request with the provided identity. +func (v *ExpressSigner) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error { + ca, ok := identity.(*internalauthsmithy.CredentialsAdapter) + if !ok { + return fmt.Errorf("unexpected identity type: %T", identity) + } + + name, ok := smithyhttp.GetSigV4SigningName(&props) + if !ok { + return fmt.Errorf("sigv4 signing name is required for s3express variant") + } + + region, ok := smithyhttp.GetSigV4SigningRegion(&props) + if !ok { + return fmt.Errorf("sigv4 signing region is required for s3express variant") + } + + hash := v4.GetPayloadHash(ctx) + + r.Header.Set(headerAmzSessionToken, ca.Credentials.SessionToken) + err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, sdk.NowTime(), func(o *v4.SignerOptions) { + o.DisableSessionToken = true + + o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props) + + o.Logger = v.Logger + o.LogSigning = v.LogSigning + }) + if err != nil { + return fmt.Errorf("sign http: %v", err) + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go index a232e622..bbc971f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/process_arn_resource.go @@ -50,6 +50,10 @@ func (m *processARNResource) HandleSerialize( ) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + // check if arn was provided, if not skip this middleware arnValue, ok := s3shared.GetARNResourceFromContext(ctx) if !ok { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go index 2e030f29..cf3f4dc8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/remove_bucket_middleware.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/transport/http" ) @@ -21,6 +22,10 @@ func (m *removeBucketFromPathMiddleware) HandleSerialize( ) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + // check if a bucket removal from HTTP path is required bucket, ok := getRemoveBucketFromPath(ctx) if !ok { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go index 325b2d36..6e1d4472 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/s3_object_lambda.go @@ -30,6 +30,10 @@ func (t *s3ObjectLambdaEndpoint) HandleSerialize( ) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + if !t.UseEndpoint { return next.HandleSerialize(ctx, in) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go index 6689acb8..756823cb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go @@ -3,6 +3,7 @@ package customizations import ( "context" "fmt" + "strings" "github.com/aws/aws-sdk-go-v2/aws" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" @@ -76,15 +77,21 @@ func (s *SignHTTPRequestMiddleware) ID() string { return "Signing" } -// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme +// HandleFinalize will take the provided input and handle signing for either +// SigV4 or SigV4A as called for. func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( out middleware.FinalizeOutput, metadata middleware.Metadata, err error, ) { - // fetch signer type from context - signerVersion := GetSignerVersion(ctx) + sv := GetSignerVersion(ctx) - switch signerVersion { - case v4a.Version: + if strings.EqualFold(sv, v4.Version) { + mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ + CredentialsProvider: s.credentialsProvider, + Signer: s.v4Signer, + LogSigning: s.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + } else if strings.EqualFold(sv, v4a.Version) { v4aCredentialProvider, ok := s.credentialsProvider.(v4a.CredentialsProvider) if !ok { return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") @@ -96,15 +103,9 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl LogSigning: s.logSigning, }) return mw.HandleFinalize(ctx, in, next) - - default: - mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: s.credentialsProvider, - Signer: s.v4Signer, - LogSigning: s.logSigning, - }) - return mw.HandleFinalize(ctx, in, next) } + + return next.HandleFinalize(ctx, in) } // RegisterSigningMiddleware registers the wrapper signing middleware to the stack. If a signing middleware is already @@ -124,6 +125,7 @@ func RegisterSigningMiddleware(stack *middleware.Stack, signingMiddleware *SignH // PresignHTTPRequestMiddlewareOptions is the options for the PresignHTTPRequestMiddleware middleware. type PresignHTTPRequestMiddlewareOptions struct { CredentialsProvider aws.CredentialsProvider + ExpressCredentials S3ExpressCredentialsProvider V4Presigner v4.HTTPPresigner V4aPresigner v4a.HTTPPresigner LogSigning bool @@ -139,6 +141,9 @@ type PresignHTTPRequestMiddleware struct { // cred provider and signer for sigv4 credentialsProvider aws.CredentialsProvider + // s3Express credentials + expressCredentials S3ExpressCredentialsProvider + // sigV4 signer v4Signer v4.HTTPPresigner @@ -153,6 +158,7 @@ type PresignHTTPRequestMiddleware struct { func NewPresignHTTPRequestMiddleware(options PresignHTTPRequestMiddlewareOptions) *PresignHTTPRequestMiddleware { return &PresignHTTPRequestMiddleware{ credentialsProvider: options.CredentialsProvider, + expressCredentials: options.ExpressCredentials, v4Signer: options.V4Presigner, v4aSigner: options.V4aPresigner, logSigning: options.LogSigning, @@ -175,26 +181,34 @@ func (p *PresignHTTPRequestMiddleware) HandleFinalize( signerVersion := GetSignerVersion(ctx) switch signerVersion { - case v4a.Version: - v4aCredentialProvider, ok := p.credentialsProvider.(v4a.CredentialsProvider) - if !ok { - return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") - } - + case "aws.auth#sigv4a": mw := v4a.NewPresignHTTPRequestMiddleware(v4a.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: v4aCredentialProvider, - Presigner: p.v4aSigner, - LogSigning: p.logSigning, + CredentialsProvider: &v4a.SymmetricCredentialAdaptor{ + SymmetricProvider: p.credentialsProvider, + }, + Presigner: p.v4aSigner, + LogSigning: p.logSigning, }) return mw.HandleFinalize(ctx, in, next) - - default: + case "aws.auth#sigv4": mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ CredentialsProvider: p.credentialsProvider, Presigner: p.v4Signer, LogSigning: p.logSigning, }) return mw.HandleFinalize(ctx, in, next) + case s3ExpressSignerVersion: + mw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{ + CredentialsProvider: &s3ExpressCredentialsAdapter{ + provider: p.expressCredentials, + bucket: GetBucket(ctx), + }, + Presigner: &s3ExpressPresignerAdapter{v4: p.v4Signer}, + LogSigning: p.logSigning, + }) + return mw.HandleFinalize(ctx, in, next) + default: + return out, metadata, fmt.Errorf("unsupported signer type \"%s\"", signerVersion) } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go index e5f95254..eedfc7ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/update_endpoint.go @@ -69,9 +69,9 @@ func UpdateEndpoint(stack *middleware.Stack, options UpdateEndpointOptions) (err const serializerID = "OperationSerializer" // initial arn look up middleware - err = stack.Initialize.Add(&s3shared.ARNLookup{ + err = stack.Initialize.Insert(&s3shared.ARNLookup{ GetARNValue: options.Accessor.GetBucketFromInput, - }, middleware.Before) + }, "legacyEndpointContextSetter", middleware.After) if err != nil { return err } @@ -141,6 +141,10 @@ func (u *updateEndpoint) HandleSerialize( ) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { + if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) { + return next.HandleSerialize(ctx, in) + } + // if arn was processed, skip this middleware if _, ok := s3shared.GetARNResourceFromContext(ctx); ok { return next.HandleSerialize(ctx, in) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go index dc0215c9..f3e6b075 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go @@ -91,13 +91,17 @@ var partitionRegexp = struct { AwsCn *regexp.Regexp AwsIso *regexp.Regexp AwsIsoB *regexp.Regexp + AwsIsoE *regexp.Regexp + AwsIsoF *regexp.Regexp AwsUsGov *regexp.Regexp }{ - Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"), + Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$"), AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"), AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"), AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"), + AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"), + AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"), AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"), } @@ -278,6 +282,27 @@ var defaultPartitions = endpoints.Partitions{ }: { Hostname: "s3.dualstack.ca-central-1.amazonaws.com", }, + endpoints.EndpointKey{ + Region: "ca-west-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.ca-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", + }, + endpoints.EndpointKey{ + Region: "ca-west-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.ca-west-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "eu-central-1", }: endpoints.Endpoint{}, @@ -363,6 +388,15 @@ var defaultPartitions = endpoints.Partitions{ }, Deprecated: aws.TrueTernary, }, + endpoints.EndpointKey{ + Region: "fips-ca-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ca-west-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "fips-us-east-1", }: endpoints.Endpoint{ @@ -399,6 +433,15 @@ var defaultPartitions = endpoints.Partitions{ }, Deprecated: aws.TrueTernary, }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "il-central-1", + Variant: endpoints.DualStackVariant, + }: { + Hostname: "s3.dualstack.il-central-1.amazonaws.com", + }, endpoints.EndpointKey{ Region: "me-central-1", }: endpoints.Endpoint{}, @@ -619,15 +662,61 @@ var defaultPartitions = endpoints.Partitions{ RegionRegex: partitionRegexp.AwsIso, IsRegionalized: true, Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-iso-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: aws.TrueTernary, + }, + endpoints.EndpointKey{ + Region: "fips-us-iso-west-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "us-iso-east-1", }: endpoints.Endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpoints.EndpointKey{ + Region: "us-iso-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, endpoints.EndpointKey{ Region: "us-iso-west-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", + }, + endpoints.EndpointKey{ + Region: "us-iso-west-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, { @@ -651,10 +740,73 @@ var defaultPartitions = endpoints.Partitions{ RegionRegex: partitionRegexp.AwsIsoB, IsRegionalized: true, Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "fips-us-isob-east-1", + }: endpoints.Endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: endpoints.CredentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: aws.TrueTernary, + }, endpoints.EndpointKey{ Region: "us-isob-east-1", }: endpoints.Endpoint{}, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + Variant: endpoints.FIPSVariant | endpoints.DualStackVariant, + }: { + Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, + endpoints.EndpointKey{ + Region: "us-isob-east-1", + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + { + ID: "aws-iso-e", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + RegionRegex: partitionRegexp.AwsIsoE, + IsRegionalized: true, + }, + { + ID: "aws-iso-f", + Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{ + { + Variant: endpoints.FIPSVariant, + }: { + Hostname: "s3-fips.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + { + Variant: 0, + }: { + Hostname: "s3.{region}.csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, }, + RegionRegex: partitionRegexp.AwsIsoF, + IsRegionalized: true, }, { ID: "aws-us-gov", @@ -821,6 +973,32 @@ func GetDNSSuffix(id string, options Options) (string, error) { } + case strings.EqualFold(id, "aws-iso-e"): + switch variant { + case endpoints.FIPSVariant: + return "cloud.adc-e.uk", nil + + case 0: + return "cloud.adc-e.uk", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + + case strings.EqualFold(id, "aws-iso-f"): + switch variant { + case endpoints.FIPSVariant: + return "csp.hci.ic.gov", nil + + case 0: + return "csp.hci.ic.gov", nil + + default: + return "", fmt.Errorf("unsupported endpoint variant %v, in partition %s", variant, id) + + } + case strings.EqualFold(id, "aws-us-gov"): switch variant { case endpoints.DualStackVariant: @@ -862,6 +1040,12 @@ func GetDNSSuffixFromRegion(region string, options Options) (string, error) { case partitionRegexp.AwsIsoB.MatchString(region): return GetDNSSuffix("aws-iso-b", options) + case partitionRegexp.AwsIsoE.MatchString(region): + return GetDNSSuffix("aws-iso-e", options) + + case partitionRegexp.AwsIsoF.MatchString(region): + return GetDNSSuffix("aws-iso-f", options) + case partitionRegexp.AwsUsGov.MatchString(region): return GetDNSSuffix("aws-us-gov", options) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go new file mode 100644 index 00000000..064bcefb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/options.go @@ -0,0 +1,314 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy" + "github.com/aws/aws-sdk-go-v2/internal/v4a" + s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations" + smithyauth "github.com/aws/smithy-go/auth" + "github.com/aws/smithy-go/logging" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "net/http" +) + +type HTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +type Options struct { + // Set of options to modify how an operation is invoked. These apply to all + // operations invoked for this client. Use functional options on operation call to + // modify this list for per operation behavior. + APIOptions []func(*middleware.Stack) error + + // The optional application specific identifier appended to the User-Agent header. + AppID string + + // This endpoint will be given as input to an EndpointResolverV2. It is used for + // providing a custom base endpoint that is subject to modifications by the + // processing EndpointResolverV2. + BaseEndpoint *string + + // Configures the events that will be sent to the configured logger. + ClientLogMode aws.ClientLogMode + + // The threshold ContentLength in bytes for HTTP PUT request to receive {Expect: + // 100-continue} header. Setting to -1 will disable adding the Expect header to + // requests; setting to 0 will set the threshold to default 2MB + ContinueHeaderThresholdBytes int64 + + // The credentials object to use when signing requests. + Credentials aws.CredentialsProvider + + // The configuration DefaultsMode that the SDK should use when constructing the + // clients initial default settings. + DefaultsMode aws.DefaultsMode + + // Allows you to disable S3 Multi-Region access points feature. + DisableMultiRegionAccessPoints bool + + // Disables this client's usage of Session Auth for S3Express buckets and reverts + // to using conventional SigV4 for those. + DisableS3ExpressSessionAuth *bool + + // The endpoint options to be used when attempting to resolve an endpoint. + EndpointOptions EndpointResolverOptions + + // The service endpoint resolver. + // + // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a + // value for this field will likely prevent you from using any endpoint-related + // service features released after the introduction of EndpointResolverV2 and + // BaseEndpoint. To migrate an EndpointResolver implementation that uses a custom + // endpoint, set the client option BaseEndpoint instead. + EndpointResolver EndpointResolver + + // Resolves the endpoint used for a particular service operation. This should be + // used over the deprecated EndpointResolver. + EndpointResolverV2 EndpointResolverV2 + + // The credentials provider for S3Express requests. + ExpressCredentials ExpressCredentialsProvider + + // Signature Version 4 (SigV4) Signer + HTTPSignerV4 HTTPSignerV4 + + // The logger writer interface to write logging messages to. + Logger logging.Logger + + // The region to send requests to. (Required) + Region string + + // RetryMaxAttempts specifies the maximum number attempts an API client will call + // an operation that fails with a retryable error. A value of 0 is ignored, and + // will not be used to configure the API client created default retryer, or modify + // per operation call's retry max attempts. If specified in an operation call's + // functional options with a value that is different than the constructed client's + // Options, the Client's Retryer will be wrapped to use the operation's specific + // RetryMaxAttempts value. + RetryMaxAttempts int + + // RetryMode specifies the retry mode the API client will be created with, if + // Retryer option is not also specified. When creating a new API Clients this + // member will only be used if the Retryer Options member is nil. This value will + // be ignored if Retryer is not nil. Currently does not support per operation call + // overrides, may in the future. + RetryMode aws.RetryMode + + // Retryer guides how HTTP requests should be retried in case of recoverable + // failures. When nil the API client will use a default retryer. The kind of + // default retry created by the API client can be changed with the RetryMode + // option. + Retryer aws.Retryer + + // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set + // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You + // should not populate this structure programmatically, or rely on the values here + // within your applications. + RuntimeEnvironment aws.RuntimeEnvironment + + // Allows you to enable arn region support for the service. + UseARNRegion bool + + // Allows you to enable S3 Accelerate feature. All operations compatible with S3 + // Accelerate will use the accelerate endpoint for requests. Requests not + // compatible will fall back to normal S3 requests. The bucket must be enabled for + // accelerate to be used with S3 client with accelerate enabled. If the bucket is + // not enabled for accelerate an error will be returned. The bucket name must be + // DNS compatible to work with accelerate. + UseAccelerate bool + + // Allows you to enable dual-stack endpoint support for the service. + // + // Deprecated: Set dual-stack by setting UseDualStackEndpoint on + // EndpointResolverOptions. When EndpointResolverOptions' UseDualStackEndpoint + // field is set it overrides this field value. + UseDualstack bool + + // Allows you to enable the client to use path-style addressing, i.e., + // https://s3.amazonaws.com/BUCKET/KEY . By default, the S3 client will use virtual + // hosted bucket addressing when possible( https://BUCKET.s3.amazonaws.com/KEY ). + UsePathStyle bool + + // Signature Version 4a (SigV4a) Signer + httpSignerV4a httpSignerV4a + + // The initial DefaultsMode used when the client options were constructed. If the + // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved + // value was at that point in time. Currently does not support per operation call + // overrides, may in the future. + resolvedDefaultsMode aws.DefaultsMode + + // The HTTP client to invoke API calls with. Defaults to client's default HTTP + // implementation if nil. + HTTPClient HTTPClient + + // The auth scheme resolver which determines how to authenticate for each + // operation. + AuthSchemeResolver AuthSchemeResolver + + // The list of auth schemes supported by the client. + AuthSchemes []smithyhttp.AuthScheme +} + +// Copy creates a clone where the APIOptions list is deep copied. +func (o Options) Copy() Options { + to := o + to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions)) + copy(to.APIOptions, o.APIOptions) + + return to +} + +func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver { + if schemeID == "aws.auth#sigv4" { + return getSigV4IdentityResolver(o) + } + if schemeID == "com.amazonaws.s3#sigv4express" { + return getExpressIdentityResolver(o) + } + if schemeID == "aws.auth#sigv4a" { + return getSigV4AIdentityResolver(o) + } + if schemeID == "smithy.api#noAuth" { + return &smithyauth.AnonymousIdentityResolver{} + } + return nil +} + +// WithAPIOptions returns a functional option for setting the Client's APIOptions +// option. +func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) { + return func(o *Options) { + o.APIOptions = append(o.APIOptions, optFns...) + } +} + +// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for +// this field will likely prevent you from using any endpoint-related service +// features released after the introduction of EndpointResolverV2 and BaseEndpoint. +// To migrate an EndpointResolver implementation that uses a custom endpoint, set +// the client option BaseEndpoint instead. +func WithEndpointResolver(v EndpointResolver) func(*Options) { + return func(o *Options) { + o.EndpointResolver = v + } +} + +// WithEndpointResolverV2 returns a functional option for setting the Client's +// EndpointResolverV2 option. +func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) { + return func(o *Options) { + o.EndpointResolverV2 = v + } +} + +func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials} + } + return nil +} + +// WithSigV4SigningName applies an override to the authentication workflow to +// use the given signing name for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing name from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningName(name string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn), + middleware.Before, + ) + }) + } +} + +// WithSigV4SigningRegion applies an override to the authentication workflow to +// use the given signing region for SigV4-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region from both auth scheme resolution and endpoint +// resolution. +func WithSigV4SigningRegion(region string) func(*Options) { + fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, + ) { + return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Initialize.Add( + middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn), + middleware.Before, + ) + }) + } +} + +func getSigV4AIdentityResolver(o Options) smithyauth.IdentityResolver { + if o.Credentials != nil { + return &v4a.CredentialsProviderAdapter{ + Provider: &v4a.SymmetricCredentialAdaptor{ + SymmetricProvider: o.Credentials, + }, + } + } + return nil +} + +// WithSigV4ASigningRegions applies an override to the authentication workflow to +// use the given signing region set for SigV4A-authenticated operations. +// +// This is an advanced setting. The value here is FINAL, taking precedence over +// the resolved signing region set from both auth scheme resolution and endpoint +// resolution. +func WithSigV4ASigningRegions(regions []string) func(*Options) { + fn := func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) ( + out middleware.FinalizeOutput, metadata middleware.Metadata, err error, + ) { + rscheme := getResolvedAuthScheme(ctx) + if rscheme == nil { + return out, metadata, fmt.Errorf("no resolved auth scheme") + } + + smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, regions) + return next.HandleFinalize(ctx, in) + } + return func(o *Options) { + o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error { + return s.Finalize.Insert( + middleware.FinalizeMiddlewareFunc("withSigV4ASigningRegions", fn), + "Signing", + middleware.Before, + ) + }) + } +} + +func ignoreAnonymousAuth(options *Options) { + if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) { + options.Credentials = nil + } +} + +func getExpressIdentityResolver(o Options) smithyauth.IdentityResolver { + if o.ExpressCredentials != nil { + return &s3cust.ExpressIdentityResolver{Provider: o.ExpressCredentials} + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go new file mode 100644 index 00000000..4e34d1a2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serialize_immutable_hostname_bucket.go @@ -0,0 +1,77 @@ +package s3 + +import ( + "context" + "fmt" + "path" + + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + + "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/encoding/httpbinding" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// serializeImmutableHostnameBucketMiddleware handles injecting the bucket name into +// "immutable" hostnames resolved via v1 EndpointResolvers. This CANNOT be done in +// serialization, since v2 endpoint resolution requires removing the {Bucket} path +// segment from all S3 requests. +// +// This will only be done for non-ARN buckets, as the features that use those require +// virtualhost manipulation to function and we previously (pre-ep2) expected the caller +// to handle that in their resolver. +type serializeImmutableHostnameBucketMiddleware struct { + UsePathStyle bool +} + +func (*serializeImmutableHostnameBucketMiddleware) ID() string { + return "serializeImmutableHostnameBucket" +} + +func (m *serializeImmutableHostnameBucketMiddleware) HandleSerialize( + ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler, +) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + bucket, ok := bucketFromInput(in.Parameters) + if !ok { + return next.HandleSerialize(ctx, in) + } + + // a bucket being un-vhostable will also force us to use path style + usePathStyle := m.UsePathStyle || !awsrulesfn.IsVirtualHostableS3Bucket(bucket, request.URL.Scheme != "https") + + if !smithyhttp.GetHostnameImmutable(ctx) && + !(awsmiddleware.GetRequiresLegacyEndpoints(ctx) && usePathStyle) { + return next.HandleSerialize(ctx, in) + } + + parsedBucket := awsrulesfn.ParseARN(bucket) + + // disallow ARN buckets except for MRAP arns + if parsedBucket != nil && len(parsedBucket.Region) > 0 { + return next.HandleSerialize(ctx, in) + } + + request.URL.Path = path.Join(request.URL.Path, bucket) + request.URL.RawPath = path.Join(request.URL.RawPath, httpbinding.EscapePath(bucket, true)) + + return next.HandleSerialize(ctx, in) +} + +func addSerializeImmutableHostnameBucketMiddleware(stack *middleware.Stack, options Options) error { + return stack.Serialize.Insert( + &serializeImmutableHostnameBucketMiddleware{ + UsePathStyle: options.UsePathStyle, + }, + "OperationSerializer", + middleware.Before, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go index 126dedcf..59524bdc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/serializers.go @@ -39,11 +39,18 @@ func (m *awsRestxml_serializeOpAbortMultipartUpload) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=AbortMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=AbortMultipartUpload") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -64,15 +71,6 @@ func awsRestxml_serializeOpHttpBindingsAbortMultipartUploadInput(v *AbortMultipa return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -120,11 +118,18 @@ func (m *awsRestxml_serializeOpCompleteMultipartUpload) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=CompleteMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CompleteMultipartUpload") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -169,15 +174,6 @@ func awsRestxml_serializeOpHttpBindingsCompleteMultipartUploadInput(v *CompleteM return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ChecksumCRC32 != nil && len(*v.ChecksumCRC32) > 0 { locationName := "X-Amz-Checksum-Crc32" encoder.SetHeader(locationName).String(*v.ChecksumCRC32) @@ -260,11 +256,18 @@ func (m *awsRestxml_serializeOpCopyObject) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=CopyObject") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=CopyObject") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -290,18 +293,9 @@ func awsRestxml_serializeOpHttpBindingsCopyObjectInput(v *CopyObjectInput, encod encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } if v.CacheControl != nil && len(*v.CacheControl) > 0 { @@ -526,11 +520,18 @@ func (m *awsRestxml_serializeOpCreateBucket) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + opPath, opQuery := httpbinding.SplitURI("/") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -580,15 +581,6 @@ func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, e encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.GrantFullControl != nil && len(*v.GrantFullControl) > 0 { locationName := "X-Amz-Grant-Full-Control" encoder.SetHeader(locationName).String(*v.GrantFullControl) @@ -614,9 +606,9 @@ func awsRestxml_serializeOpHttpBindingsCreateBucketInput(v *CreateBucketInput, e encoder.SetHeader(locationName).String(*v.GrantWriteACP) } - if v.ObjectLockEnabledForBucket { + if v.ObjectLockEnabledForBucket != nil { locationName := "X-Amz-Bucket-Object-Lock-Enabled" - encoder.SetHeader(locationName).Boolean(v.ObjectLockEnabledForBucket) + encoder.SetHeader(locationName).Boolean(*v.ObjectLockEnabledForBucket) } if len(v.ObjectOwnership) > 0 { @@ -648,11 +640,18 @@ func (m *awsRestxml_serializeOpCreateMultipartUpload) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?uploads&x-id=CreateMultipartUpload") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?uploads&x-id=CreateMultipartUpload") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -678,18 +677,9 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } if v.CacheControl != nil && len(*v.CacheControl) > 0 { @@ -838,6 +828,67 @@ func awsRestxml_serializeOpHttpBindingsCreateMultipartUploadInput(v *CreateMulti return nil } +type awsRestxml_serializeOpCreateSession struct { +} + +func (*awsRestxml_serializeOpCreateSession) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpCreateSession) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*CreateSessionInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?session") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsCreateSessionInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsCreateSessionInput(v *CreateSessionInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if len(v.SessionMode) > 0 { + locationName := "X-Amz-Create-Session-Mode" + encoder.SetHeader(locationName).String(string(v.SessionMode)) + } + + return nil +} + type awsRestxml_serializeOpDeleteBucket struct { } @@ -859,11 +910,18 @@ func (m *awsRestxml_serializeOpDeleteBucket) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + opPath, opQuery := httpbinding.SplitURI("/") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -884,15 +942,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketInput(v *DeleteBucketInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -922,11 +971,18 @@ func (m *awsRestxml_serializeOpDeleteBucketAnalyticsConfiguration) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics") + opPath, opQuery := httpbinding.SplitURI("/?analytics") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -947,15 +1003,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketAnalyticsConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -989,11 +1036,18 @@ func (m *awsRestxml_serializeOpDeleteBucketCors) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors") + opPath, opQuery := httpbinding.SplitURI("/?cors") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1014,15 +1068,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketCorsInput(v *DeleteBucketCors return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1052,11 +1097,18 @@ func (m *awsRestxml_serializeOpDeleteBucketEncryption) HandleSerialize(ctx conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption") + opPath, opQuery := httpbinding.SplitURI("/?encryption") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1077,15 +1129,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketEncryptionInput(v *DeleteBuck return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1115,11 +1158,18 @@ func (m *awsRestxml_serializeOpDeleteBucketIntelligentTieringConfiguration) Hand return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering") + opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1140,15 +1190,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketIntelligentTieringConfigurati return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.Id != nil { encoder.SetQuery("id").String(*v.Id) } @@ -1177,11 +1218,18 @@ func (m *awsRestxml_serializeOpDeleteBucketInventoryConfiguration) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory") + opPath, opQuery := httpbinding.SplitURI("/?inventory") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1202,15 +1250,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketInventoryConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1244,11 +1283,18 @@ func (m *awsRestxml_serializeOpDeleteBucketLifecycle) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle") + opPath, opQuery := httpbinding.SplitURI("/?lifecycle") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1269,15 +1315,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketLifecycleInput(v *DeleteBucke return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1307,11 +1344,18 @@ func (m *awsRestxml_serializeOpDeleteBucketMetricsConfiguration) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics") + opPath, opQuery := httpbinding.SplitURI("/?metrics") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1332,15 +1376,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketMetricsConfigurationInput(v * return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1374,11 +1409,18 @@ func (m *awsRestxml_serializeOpDeleteBucketOwnershipControls) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls") + opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1399,15 +1441,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketOwnershipControlsInput(v *Del return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1437,11 +1470,18 @@ func (m *awsRestxml_serializeOpDeleteBucketPolicy) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy") + opPath, opQuery := httpbinding.SplitURI("/?policy") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1462,15 +1502,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketPolicyInput(v *DeleteBucketPo return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1500,11 +1531,18 @@ func (m *awsRestxml_serializeOpDeleteBucketReplication) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication") + opPath, opQuery := httpbinding.SplitURI("/?replication") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1525,15 +1563,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketReplicationInput(v *DeleteBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1563,11 +1592,18 @@ func (m *awsRestxml_serializeOpDeleteBucketTagging) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging") + opPath, opQuery := httpbinding.SplitURI("/?tagging") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1588,15 +1624,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketTaggingInput(v *DeleteBucketT return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1626,11 +1653,18 @@ func (m *awsRestxml_serializeOpDeleteBucketWebsite) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website") + opPath, opQuery := httpbinding.SplitURI("/?website") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1651,15 +1685,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteBucketWebsiteInput(v *DeleteBucketW return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1689,11 +1714,18 @@ func (m *awsRestxml_serializeOpDeleteObject) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=DeleteObject") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=DeleteObject") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1714,18 +1746,9 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectInput(v *DeleteObjectInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - - if v.BypassGovernanceRetention { + if v.BypassGovernanceRetention != nil { locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) } if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { @@ -1780,11 +1803,18 @@ func (m *awsRestxml_serializeOpDeleteObjects) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?delete&x-id=DeleteObjects") + opPath, opQuery := httpbinding.SplitURI("/?delete&x-id=DeleteObjects") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1829,18 +1859,9 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectsInput(v *DeleteObjectsInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - - if v.BypassGovernanceRetention { + if v.BypassGovernanceRetention != nil { locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) } if len(v.ChecksumAlgorithm) > 0 { @@ -1887,11 +1908,18 @@ func (m *awsRestxml_serializeOpDeleteObjectTagging) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1912,15 +1940,6 @@ func awsRestxml_serializeOpHttpBindingsDeleteObjectTaggingInput(v *DeleteObjectT return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -1963,11 +1982,18 @@ func (m *awsRestxml_serializeOpDeletePublicAccessBlock) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock") + opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "DELETE" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -1988,15 +2014,6 @@ func awsRestxml_serializeOpHttpBindingsDeletePublicAccessBlockInput(v *DeletePub return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2026,11 +2043,18 @@ func (m *awsRestxml_serializeOpGetBucketAccelerateConfiguration) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?accelerate") + opPath, opQuery := httpbinding.SplitURI("/?accelerate") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2051,20 +2075,16 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAccelerateConfigurationInput(v * return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + return nil } @@ -2089,11 +2109,18 @@ func (m *awsRestxml_serializeOpGetBucketAcl) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?acl") + opPath, opQuery := httpbinding.SplitURI("/?acl") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2114,15 +2141,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAclInput(v *GetBucketAclInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2152,11 +2170,18 @@ func (m *awsRestxml_serializeOpGetBucketAnalyticsConfiguration) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics&x-id=GetBucketAnalyticsConfiguration") + opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=GetBucketAnalyticsConfiguration") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2177,15 +2202,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketAnalyticsConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2219,11 +2235,18 @@ func (m *awsRestxml_serializeOpGetBucketCors) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors") + opPath, opQuery := httpbinding.SplitURI("/?cors") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2244,15 +2267,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketCorsInput(v *GetBucketCorsInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2282,11 +2296,18 @@ func (m *awsRestxml_serializeOpGetBucketEncryption) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption") + opPath, opQuery := httpbinding.SplitURI("/?encryption") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2307,15 +2328,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketEncryptionInput(v *GetBucketEncr return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2345,11 +2357,18 @@ func (m *awsRestxml_serializeOpGetBucketIntelligentTieringConfiguration) HandleS return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration") + opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2370,15 +2389,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketIntelligentTieringConfigurationI return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.Id != nil { encoder.SetQuery("id").String(*v.Id) } @@ -2407,11 +2417,18 @@ func (m *awsRestxml_serializeOpGetBucketInventoryConfiguration) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory&x-id=GetBucketInventoryConfiguration") + opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=GetBucketInventoryConfiguration") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2432,15 +2449,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketInventoryConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2474,11 +2482,18 @@ func (m *awsRestxml_serializeOpGetBucketLifecycleConfiguration) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle") + opPath, opQuery := httpbinding.SplitURI("/?lifecycle") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2499,15 +2514,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLifecycleConfigurationInput(v *G return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2537,11 +2543,18 @@ func (m *awsRestxml_serializeOpGetBucketLocation) HandleSerialize(ctx context.Co return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?location") + opPath, opQuery := httpbinding.SplitURI("/?location") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2562,15 +2575,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLocationInput(v *GetBucketLocati return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2600,11 +2604,18 @@ func (m *awsRestxml_serializeOpGetBucketLogging) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?logging") + opPath, opQuery := httpbinding.SplitURI("/?logging") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2625,15 +2636,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketLoggingInput(v *GetBucketLogging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2663,11 +2665,18 @@ func (m *awsRestxml_serializeOpGetBucketMetricsConfiguration) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics&x-id=GetBucketMetricsConfiguration") + opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=GetBucketMetricsConfiguration") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2688,15 +2697,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketMetricsConfigurationInput(v *Get return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2730,11 +2730,18 @@ func (m *awsRestxml_serializeOpGetBucketNotificationConfiguration) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?notification") + opPath, opQuery := httpbinding.SplitURI("/?notification") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2755,15 +2762,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketNotificationConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2793,11 +2791,18 @@ func (m *awsRestxml_serializeOpGetBucketOwnershipControls) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls") + opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2818,15 +2823,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketOwnershipControlsInput(v *GetBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2856,11 +2852,18 @@ func (m *awsRestxml_serializeOpGetBucketPolicy) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy") + opPath, opQuery := httpbinding.SplitURI("/?policy") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2881,15 +2884,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketPolicyInput(v *GetBucketPolicyIn return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2919,11 +2913,18 @@ func (m *awsRestxml_serializeOpGetBucketPolicyStatus) HandleSerialize(ctx contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policyStatus") + opPath, opQuery := httpbinding.SplitURI("/?policyStatus") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -2944,15 +2945,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketPolicyStatusInput(v *GetBucketPo return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -2982,11 +2974,18 @@ func (m *awsRestxml_serializeOpGetBucketReplication) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication") + opPath, opQuery := httpbinding.SplitURI("/?replication") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3007,15 +3006,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketReplicationInput(v *GetBucketRep return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3045,11 +3035,18 @@ func (m *awsRestxml_serializeOpGetBucketRequestPayment) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?requestPayment") + opPath, opQuery := httpbinding.SplitURI("/?requestPayment") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3070,15 +3067,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketRequestPaymentInput(v *GetBucket return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3108,11 +3096,18 @@ func (m *awsRestxml_serializeOpGetBucketTagging) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging") + opPath, opQuery := httpbinding.SplitURI("/?tagging") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3133,15 +3128,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketTaggingInput(v *GetBucketTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3171,11 +3157,18 @@ func (m *awsRestxml_serializeOpGetBucketVersioning) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versioning") + opPath, opQuery := httpbinding.SplitURI("/?versioning") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3196,15 +3189,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketVersioningInput(v *GetBucketVers return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3234,11 +3218,18 @@ func (m *awsRestxml_serializeOpGetBucketWebsite) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website") + opPath, opQuery := httpbinding.SplitURI("/?website") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3259,15 +3250,6 @@ func awsRestxml_serializeOpHttpBindingsGetBucketWebsiteInput(v *GetBucketWebsite return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3297,11 +3279,18 @@ func (m *awsRestxml_serializeOpGetObject) HandleSerialize(ctx context.Context, i return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=GetObject") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=GetObject") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3322,15 +3311,6 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumMode) > 0 { locationName := "X-Amz-Checksum-Mode" encoder.SetHeader(locationName).String(string(v.ChecksumMode)) @@ -3370,8 +3350,8 @@ func awsRestxml_serializeOpHttpBindingsGetObjectInput(v *GetObjectInput, encoder } } - if v.PartNumber != 0 { - encoder.SetQuery("partNumber").Integer(v.PartNumber) + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) } if v.Range != nil && len(*v.Range) > 0 { @@ -3451,11 +3431,18 @@ func (m *awsRestxml_serializeOpGetObjectAcl) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?acl") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3476,15 +3463,6 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAclInput(v *GetObjectAclInput, e return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3532,11 +3510,18 @@ func (m *awsRestxml_serializeOpGetObjectAttributes) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?attributes") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?attributes") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3557,15 +3542,6 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3580,9 +3556,9 @@ func awsRestxml_serializeOpHttpBindingsGetObjectAttributesInput(v *GetObjectAttr } } - if v.MaxParts != 0 { + if v.MaxParts != nil { locationName := "X-Amz-Max-Parts" - encoder.SetHeader(locationName).Integer(v.MaxParts) + encoder.SetHeader(locationName).Integer(*v.MaxParts) } if v.ObjectAttributes != nil { @@ -3652,11 +3628,18 @@ func (m *awsRestxml_serializeOpGetObjectLegalHold) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?legal-hold") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3677,15 +3660,6 @@ func awsRestxml_serializeOpHttpBindingsGetObjectLegalHoldInput(v *GetObjectLegal return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3733,11 +3707,18 @@ func (m *awsRestxml_serializeOpGetObjectLockConfiguration) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?object-lock") + opPath, opQuery := httpbinding.SplitURI("/?object-lock") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3758,15 +3739,6 @@ func awsRestxml_serializeOpHttpBindingsGetObjectLockConfigurationInput(v *GetObj return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3796,11 +3768,18 @@ func (m *awsRestxml_serializeOpGetObjectRetention) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?retention") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3821,15 +3800,6 @@ func awsRestxml_serializeOpHttpBindingsGetObjectRetentionInput(v *GetObjectReten return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3877,11 +3847,18 @@ func (m *awsRestxml_serializeOpGetObjectTagging) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3902,15 +3879,6 @@ func awsRestxml_serializeOpHttpBindingsGetObjectTaggingInput(v *GetObjectTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -3958,11 +3926,18 @@ func (m *awsRestxml_serializeOpGetObjectTorrent) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?torrent") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?torrent") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -3983,15 +3958,6 @@ func awsRestxml_serializeOpHttpBindingsGetObjectTorrentInput(v *GetObjectTorrent return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -4035,11 +4001,18 @@ func (m *awsRestxml_serializeOpGetPublicAccessBlock) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock") + opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4060,15 +4033,6 @@ func awsRestxml_serializeOpHttpBindingsGetPublicAccessBlockInput(v *GetPublicAcc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -4098,11 +4062,18 @@ func (m *awsRestxml_serializeOpHeadBucket) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + opPath, opQuery := httpbinding.SplitURI("/") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "HEAD" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4123,15 +4094,6 @@ func awsRestxml_serializeOpHttpBindingsHeadBucketInput(v *HeadBucketInput, encod return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -4161,11 +4123,18 @@ func (m *awsRestxml_serializeOpHeadObject) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}") + opPath, opQuery := httpbinding.SplitURI("/{Key+}") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "HEAD" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4186,15 +4155,6 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumMode) > 0 { locationName := "X-Amz-Checksum-Mode" encoder.SetHeader(locationName).String(string(v.ChecksumMode)) @@ -4234,8 +4194,8 @@ func awsRestxml_serializeOpHttpBindingsHeadObjectInput(v *HeadObjectInput, encod } } - if v.PartNumber != 0 { - encoder.SetQuery("partNumber").Integer(v.PartNumber) + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) } if v.Range != nil && len(*v.Range) > 0 { @@ -4291,11 +4251,18 @@ func (m *awsRestxml_serializeOpListBucketAnalyticsConfigurations) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics&x-id=ListBucketAnalyticsConfigurations") + opPath, opQuery := httpbinding.SplitURI("/?analytics&x-id=ListBucketAnalyticsConfigurations") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4316,15 +4283,6 @@ func awsRestxml_serializeOpHttpBindingsListBucketAnalyticsConfigurationsInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ContinuationToken != nil { encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } @@ -4358,11 +4316,18 @@ func (m *awsRestxml_serializeOpListBucketIntelligentTieringConfigurations) Handl return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations") + opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4383,15 +4348,6 @@ func awsRestxml_serializeOpHttpBindingsListBucketIntelligentTieringConfiguration return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ContinuationToken != nil { encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } @@ -4420,11 +4376,18 @@ func (m *awsRestxml_serializeOpListBucketInventoryConfigurations) HandleSerializ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory&x-id=ListBucketInventoryConfigurations") + opPath, opQuery := httpbinding.SplitURI("/?inventory&x-id=ListBucketInventoryConfigurations") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4445,15 +4408,6 @@ func awsRestxml_serializeOpHttpBindingsListBucketInventoryConfigurationsInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ContinuationToken != nil { encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } @@ -4487,11 +4441,18 @@ func (m *awsRestxml_serializeOpListBucketMetricsConfigurations) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics&x-id=ListBucketMetricsConfigurations") + opPath, opQuery := httpbinding.SplitURI("/?metrics&x-id=ListBucketMetricsConfigurations") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4512,15 +4473,6 @@ func awsRestxml_serializeOpHttpBindingsListBucketMetricsConfigurationsInput(v *L return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ContinuationToken != nil { encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } @@ -4554,11 +4506,18 @@ func (m *awsRestxml_serializeOpListBuckets) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/") + opPath, opQuery := httpbinding.SplitURI("/?x-id=ListBuckets") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4578,6 +4537,70 @@ func awsRestxml_serializeOpHttpBindingsListBucketsInput(v *ListBucketsInput, enc return nil } +type awsRestxml_serializeOpListDirectoryBuckets struct { +} + +func (*awsRestxml_serializeOpListDirectoryBuckets) ID() string { + return "OperationSerializer" +} + +func (m *awsRestxml_serializeOpListDirectoryBuckets) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*ListDirectoryBucketsInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + opPath, opQuery := httpbinding.SplitURI("/?x-id=ListDirectoryBuckets") + request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) + request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) + request.Method = "GET" + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if err := awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(input, restEncoder); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = restEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsListDirectoryBucketsInput(v *ListDirectoryBucketsInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) + } + + if v.ContinuationToken != nil { + encoder.SetQuery("continuation-token").String(*v.ContinuationToken) + } + + if v.MaxDirectoryBuckets != nil { + encoder.SetQuery("max-directory-buckets").Integer(*v.MaxDirectoryBuckets) + } + + return nil +} + type awsRestxml_serializeOpListMultipartUploads struct { } @@ -4599,11 +4622,18 @@ func (m *awsRestxml_serializeOpListMultipartUploads) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?uploads") + opPath, opQuery := httpbinding.SplitURI("/?uploads") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4624,15 +4654,6 @@ func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipar return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.Delimiter != nil { encoder.SetQuery("delimiter").String(*v.Delimiter) } @@ -4650,14 +4671,19 @@ func awsRestxml_serializeOpHttpBindingsListMultipartUploadsInput(v *ListMultipar encoder.SetQuery("key-marker").String(*v.KeyMarker) } - if v.MaxUploads != 0 { - encoder.SetQuery("max-uploads").Integer(v.MaxUploads) + if v.MaxUploads != nil { + encoder.SetQuery("max-uploads").Integer(*v.MaxUploads) } if v.Prefix != nil { encoder.SetQuery("prefix").String(*v.Prefix) } + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + if v.UploadIdMarker != nil { encoder.SetQuery("upload-id-marker").String(*v.UploadIdMarker) } @@ -4686,11 +4712,18 @@ func (m *awsRestxml_serializeOpListObjects) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}") + opPath, opQuery := httpbinding.SplitURI("/") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4711,15 +4744,6 @@ func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, enc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.Delimiter != nil { encoder.SetQuery("delimiter").String(*v.Delimiter) } @@ -4737,8 +4761,22 @@ func awsRestxml_serializeOpHttpBindingsListObjectsInput(v *ListObjectsInput, enc encoder.SetQuery("marker").String(*v.Marker) } - if v.MaxKeys != 0 { - encoder.SetQuery("max-keys").Integer(v.MaxKeys) + if v.MaxKeys != nil { + encoder.SetQuery("max-keys").Integer(*v.MaxKeys) + } + + if v.OptionalObjectAttributes != nil { + locationName := "X-Amz-Optional-Object-Attributes" + for i := range v.OptionalObjectAttributes { + if len(v.OptionalObjectAttributes[i]) > 0 { + escaped := string(v.OptionalObjectAttributes[i]) + if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { + escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) + } + + encoder.AddHeader(locationName).String(string(escaped)) + } + } } if v.Prefix != nil { @@ -4774,11 +4812,18 @@ func (m *awsRestxml_serializeOpListObjectsV2) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?list-type=2") + opPath, opQuery := httpbinding.SplitURI("/?list-type=2") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4799,15 +4844,6 @@ func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ContinuationToken != nil { encoder.SetQuery("continuation-token").String(*v.ContinuationToken) } @@ -4825,12 +4861,26 @@ func awsRestxml_serializeOpHttpBindingsListObjectsV2Input(v *ListObjectsV2Input, encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.FetchOwner { - encoder.SetQuery("fetch-owner").Boolean(v.FetchOwner) + if v.FetchOwner != nil { + encoder.SetQuery("fetch-owner").Boolean(*v.FetchOwner) } - if v.MaxKeys != 0 { - encoder.SetQuery("max-keys").Integer(v.MaxKeys) + if v.MaxKeys != nil { + encoder.SetQuery("max-keys").Integer(*v.MaxKeys) + } + + if v.OptionalObjectAttributes != nil { + locationName := "X-Amz-Optional-Object-Attributes" + for i := range v.OptionalObjectAttributes { + if len(v.OptionalObjectAttributes[i]) > 0 { + escaped := string(v.OptionalObjectAttributes[i]) + if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { + escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) + } + + encoder.AddHeader(locationName).String(string(escaped)) + } + } } if v.Prefix != nil { @@ -4870,11 +4920,18 @@ func (m *awsRestxml_serializeOpListObjectVersions) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versions") + opPath, opQuery := httpbinding.SplitURI("/?versions") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4895,15 +4952,6 @@ func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVers return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.Delimiter != nil { encoder.SetQuery("delimiter").String(*v.Delimiter) } @@ -4921,14 +4969,33 @@ func awsRestxml_serializeOpHttpBindingsListObjectVersionsInput(v *ListObjectVers encoder.SetQuery("key-marker").String(*v.KeyMarker) } - if v.MaxKeys != 0 { - encoder.SetQuery("max-keys").Integer(v.MaxKeys) + if v.MaxKeys != nil { + encoder.SetQuery("max-keys").Integer(*v.MaxKeys) + } + + if v.OptionalObjectAttributes != nil { + locationName := "X-Amz-Optional-Object-Attributes" + for i := range v.OptionalObjectAttributes { + if len(v.OptionalObjectAttributes[i]) > 0 { + escaped := string(v.OptionalObjectAttributes[i]) + if strings.Index(string(v.OptionalObjectAttributes[i]), `,`) != -1 || strings.Index(string(v.OptionalObjectAttributes[i]), `"`) != -1 { + escaped = strconv.Quote(string(v.OptionalObjectAttributes[i])) + } + + encoder.AddHeader(locationName).String(string(escaped)) + } + } } if v.Prefix != nil { encoder.SetQuery("prefix").String(*v.Prefix) } + if len(v.RequestPayer) > 0 { + locationName := "X-Amz-Request-Payer" + encoder.SetHeader(locationName).String(string(v.RequestPayer)) + } + if v.VersionIdMarker != nil { encoder.SetQuery("version-id-marker").String(*v.VersionIdMarker) } @@ -4957,11 +5024,18 @@ func (m *awsRestxml_serializeOpListParts) HandleSerialize(ctx context.Context, i return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=ListParts") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=ListParts") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "GET" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -4982,15 +5056,6 @@ func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -5005,8 +5070,8 @@ func awsRestxml_serializeOpHttpBindingsListPartsInput(v *ListPartsInput, encoder } } - if v.MaxParts != 0 { - encoder.SetQuery("max-parts").Integer(v.MaxParts) + if v.MaxParts != nil { + encoder.SetQuery("max-parts").Integer(*v.MaxParts) } if v.PartNumberMarker != nil { @@ -5061,11 +5126,18 @@ func (m *awsRestxml_serializeOpPutBucketAccelerateConfiguration) HandleSerialize return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?accelerate") + opPath, opQuery := httpbinding.SplitURI("/?accelerate") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5110,15 +5182,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAccelerateConfigurationInput(v * return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -5153,11 +5216,18 @@ func (m *awsRestxml_serializeOpPutBucketAcl) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?acl") + opPath, opQuery := httpbinding.SplitURI("/?acl") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5207,15 +5277,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAclInput(v *PutBucketAclInput, e encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -5280,11 +5341,18 @@ func (m *awsRestxml_serializeOpPutBucketAnalyticsConfiguration) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?analytics") + opPath, opQuery := httpbinding.SplitURI("/?analytics") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5329,15 +5397,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketAnalyticsConfigurationInput(v *P return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -5371,11 +5430,18 @@ func (m *awsRestxml_serializeOpPutBucketCors) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?cors") + opPath, opQuery := httpbinding.SplitURI("/?cors") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5420,15 +5486,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketCorsInput(v *PutBucketCorsInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -5468,11 +5525,18 @@ func (m *awsRestxml_serializeOpPutBucketEncryption) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?encryption") + opPath, opQuery := httpbinding.SplitURI("/?encryption") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5517,15 +5581,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketEncryptionInput(v *PutBucketEncr return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -5565,11 +5620,18 @@ func (m *awsRestxml_serializeOpPutBucketIntelligentTieringConfiguration) HandleS return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?intelligent-tiering") + opPath, opQuery := httpbinding.SplitURI("/?intelligent-tiering") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5614,15 +5676,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketIntelligentTieringConfigurationI return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.Id != nil { encoder.SetQuery("id").String(*v.Id) } @@ -5651,11 +5704,18 @@ func (m *awsRestxml_serializeOpPutBucketInventoryConfiguration) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?inventory") + opPath, opQuery := httpbinding.SplitURI("/?inventory") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5700,15 +5760,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketInventoryConfigurationInput(v *P return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -5742,11 +5793,18 @@ func (m *awsRestxml_serializeOpPutBucketLifecycleConfiguration) HandleSerialize( return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?lifecycle") + opPath, opQuery := httpbinding.SplitURI("/?lifecycle") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5791,15 +5849,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketLifecycleConfigurationInput(v *P return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -5834,11 +5883,18 @@ func (m *awsRestxml_serializeOpPutBucketLogging) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?logging") + opPath, opQuery := httpbinding.SplitURI("/?logging") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5883,15 +5939,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketLoggingInput(v *PutBucketLogging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -5931,11 +5978,18 @@ func (m *awsRestxml_serializeOpPutBucketMetricsConfiguration) HandleSerialize(ct return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?metrics") + opPath, opQuery := httpbinding.SplitURI("/?metrics") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -5980,15 +6034,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketMetricsConfigurationInput(v *Put return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -6022,11 +6067,18 @@ func (m *awsRestxml_serializeOpPutBucketNotificationConfiguration) HandleSeriali return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?notification") + opPath, opQuery := httpbinding.SplitURI("/?notification") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -6071,23 +6123,14 @@ func awsRestxml_serializeOpHttpBindingsPutBucketNotificationConfigurationInput(v return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) } - if v.SkipDestinationValidation { + if v.SkipDestinationValidation != nil { locationName := "X-Amz-Skip-Destination-Validation" - encoder.SetHeader(locationName).Boolean(v.SkipDestinationValidation) + encoder.SetHeader(locationName).Boolean(*v.SkipDestinationValidation) } return nil @@ -6114,11 +6157,18 @@ func (m *awsRestxml_serializeOpPutBucketOwnershipControls) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?ownershipControls") + opPath, opQuery := httpbinding.SplitURI("/?ownershipControls") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -6163,15 +6213,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketOwnershipControlsInput(v *PutBuc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { locationName := "Content-Md5" encoder.SetHeader(locationName).String(*v.ContentMD5) @@ -6206,11 +6247,18 @@ func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?policy") + opPath, opQuery := httpbinding.SplitURI("/?policy") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -6236,20 +6284,11 @@ func (m *awsRestxml_serializeOpPutBucketPolicy) HandleSerialize(ctx context.Cont } in.Request = request - return next.HandleSerialize(ctx, in) -} -func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { - if v == nil { - return fmt.Errorf("unsupported serialization of nil %T", v) - } - - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } + return next.HandleSerialize(ctx, in) +} +func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyInput, encoder *httpbinding.Encoder) error { + if v == nil { + return fmt.Errorf("unsupported serialization of nil %T", v) } if len(v.ChecksumAlgorithm) > 0 { @@ -6257,9 +6296,9 @@ func awsRestxml_serializeOpHttpBindingsPutBucketPolicyInput(v *PutBucketPolicyIn encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) } - if v.ConfirmRemoveSelfBucketAccess { + if v.ConfirmRemoveSelfBucketAccess != nil { locationName := "X-Amz-Confirm-Remove-Self-Bucket-Access" - encoder.SetHeader(locationName).Boolean(v.ConfirmRemoveSelfBucketAccess) + encoder.SetHeader(locationName).Boolean(*v.ConfirmRemoveSelfBucketAccess) } if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { @@ -6296,11 +6335,18 @@ func (m *awsRestxml_serializeOpPutBucketReplication) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?replication") + opPath, opQuery := httpbinding.SplitURI("/?replication") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -6345,15 +6391,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketReplicationInput(v *PutBucketRep return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -6398,11 +6435,18 @@ func (m *awsRestxml_serializeOpPutBucketRequestPayment) HandleSerialize(ctx cont return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?requestPayment") + opPath, opQuery := httpbinding.SplitURI("/?requestPayment") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -6447,15 +6491,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketRequestPaymentInput(v *PutBucket return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -6495,11 +6530,18 @@ func (m *awsRestxml_serializeOpPutBucketTagging) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?tagging") + opPath, opQuery := httpbinding.SplitURI("/?tagging") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -6544,15 +6586,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketTaggingInput(v *PutBucketTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -6592,11 +6625,18 @@ func (m *awsRestxml_serializeOpPutBucketVersioning) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?versioning") + opPath, opQuery := httpbinding.SplitURI("/?versioning") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -6641,15 +6681,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketVersioningInput(v *PutBucketVers return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -6694,11 +6725,18 @@ func (m *awsRestxml_serializeOpPutBucketWebsite) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?website") + opPath, opQuery := httpbinding.SplitURI("/?website") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -6743,15 +6781,6 @@ func awsRestxml_serializeOpHttpBindingsPutBucketWebsiteInput(v *PutBucketWebsite return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -6791,11 +6820,18 @@ func (m *awsRestxml_serializeOpPutObject) HandleSerialize(ctx context.Context, i return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=PutObject") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=PutObject") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -6833,18 +6869,9 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { locationName := "X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } if v.CacheControl != nil && len(*v.CacheControl) > 0 { @@ -6892,9 +6919,9 @@ func awsRestxml_serializeOpHttpBindingsPutObjectInput(v *PutObjectInput, encoder encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentLength != 0 { + if v.ContentLength != nil { locationName := "Content-Length" - encoder.SetHeader(locationName).Long(v.ContentLength) + encoder.SetHeader(locationName).Long(*v.ContentLength) } if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { @@ -7044,11 +7071,18 @@ func (m *awsRestxml_serializeOpPutObjectAcl) HandleSerialize(ctx context.Context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?acl") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?acl") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -7098,15 +7132,6 @@ func awsRestxml_serializeOpHttpBindingsPutObjectAclInput(v *PutObjectAclInput, e encoder.SetHeader(locationName).String(string(v.ACL)) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -7189,11 +7214,18 @@ func (m *awsRestxml_serializeOpPutObjectLegalHold) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?legal-hold") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?legal-hold") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -7238,15 +7270,6 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLegalHoldInput(v *PutObjectLegal return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -7304,11 +7327,18 @@ func (m *awsRestxml_serializeOpPutObjectLockConfiguration) HandleSerialize(ctx c return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?object-lock") + opPath, opQuery := httpbinding.SplitURI("/?object-lock") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -7353,15 +7383,6 @@ func awsRestxml_serializeOpHttpBindingsPutObjectLockConfigurationInput(v *PutObj return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -7411,11 +7432,18 @@ func (m *awsRestxml_serializeOpPutObjectRetention) HandleSerialize(ctx context.C return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?retention") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?retention") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -7460,18 +7488,9 @@ func awsRestxml_serializeOpHttpBindingsPutObjectRetentionInput(v *PutObjectReten return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - - if v.BypassGovernanceRetention { + if v.BypassGovernanceRetention != nil { locationName := "X-Amz-Bypass-Governance-Retention" - encoder.SetHeader(locationName).Boolean(v.BypassGovernanceRetention) + encoder.SetHeader(locationName).Boolean(*v.BypassGovernanceRetention) } if len(v.ChecksumAlgorithm) > 0 { @@ -7531,11 +7550,18 @@ func (m *awsRestxml_serializeOpPutObjectTagging) HandleSerialize(ctx context.Con return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?tagging") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?tagging") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -7580,15 +7606,6 @@ func awsRestxml_serializeOpHttpBindingsPutObjectTaggingInput(v *PutObjectTagging return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -7646,11 +7663,18 @@ func (m *awsRestxml_serializeOpPutPublicAccessBlock) HandleSerialize(ctx context return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}?publicAccessBlock") + opPath, opQuery := httpbinding.SplitURI("/?publicAccessBlock") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -7695,15 +7719,6 @@ func awsRestxml_serializeOpHttpBindingsPutPublicAccessBlockInput(v *PutPublicAcc return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -7743,11 +7758,18 @@ func (m *awsRestxml_serializeOpRestoreObject) HandleSerialize(ctx context.Contex return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?restore&x-id=RestoreObject") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?restore&x-id=RestoreObject") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -7792,15 +7814,6 @@ func awsRestxml_serializeOpHttpBindingsRestoreObjectInput(v *RestoreObjectInput, return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -7853,11 +7866,18 @@ func (m *awsRestxml_serializeOpSelectObjectContent) HandleSerialize(ctx context. return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?select&select-type=2&x-id=SelectObjectContent") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?select&select-type=2&x-id=SelectObjectContent") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -7896,15 +7916,6 @@ func awsRestxml_serializeOpHttpBindingsSelectObjectContentInput(v *SelectObjectC return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.ExpectedBucketOwner != nil && len(*v.ExpectedBucketOwner) > 0 { locationName := "X-Amz-Expected-Bucket-Owner" encoder.SetHeader(locationName).String(*v.ExpectedBucketOwner) @@ -8037,11 +8048,18 @@ func (m *awsRestxml_serializeOpUploadPart) HandleSerialize(ctx context.Context, return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=UploadPart") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPart") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -8074,15 +8092,6 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if len(v.ChecksumAlgorithm) > 0 { locationName := "X-Amz-Sdk-Checksum-Algorithm" encoder.SetHeader(locationName).String(string(v.ChecksumAlgorithm)) @@ -8108,9 +8117,9 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod encoder.SetHeader(locationName).String(*v.ChecksumSHA256) } - if v.ContentLength != 0 { + if v.ContentLength != nil { locationName := "Content-Length" - encoder.SetHeader(locationName).Long(v.ContentLength) + encoder.SetHeader(locationName).Long(*v.ContentLength) } if v.ContentMD5 != nil && len(*v.ContentMD5) > 0 { @@ -8132,8 +8141,8 @@ func awsRestxml_serializeOpHttpBindingsUploadPartInput(v *UploadPartInput, encod } } - { - encoder.SetQuery("partNumber").Integer(v.PartNumber) + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) } if len(v.RequestPayer) > 0 { @@ -8184,11 +8193,18 @@ func (m *awsRestxml_serializeOpUploadPartCopy) HandleSerialize(ctx context.Conte return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} } - opPath, opQuery := httpbinding.SplitURI("/{Bucket}/{Key+}?x-id=UploadPartCopy") + opPath, opQuery := httpbinding.SplitURI("/{Key+}?x-id=UploadPartCopy") request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "PUT" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -8209,15 +8225,6 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu return fmt.Errorf("unsupported serialization of nil %T", v) } - if v.Bucket == nil || len(*v.Bucket) == 0 { - return &smithy.SerializationError{Err: fmt.Errorf("input member Bucket must not be empty")} - } - if v.Bucket != nil { - if err := encoder.SetURI("Bucket").String(*v.Bucket); err != nil { - return err - } - } - if v.CopySource != nil && len(*v.CopySource) > 0 { locationName := "X-Amz-Copy-Source" encoder.SetHeader(locationName).String(*v.CopySource) @@ -8282,8 +8289,8 @@ func awsRestxml_serializeOpHttpBindingsUploadPartCopyInput(v *UploadPartCopyInpu } } - { - encoder.SetQuery("partNumber").Integer(v.PartNumber) + if v.PartNumber != nil { + encoder.SetQuery("partNumber").Integer(*v.PartNumber) } if len(v.RequestPayer) > 0 { @@ -8338,7 +8345,14 @@ func (m *awsRestxml_serializeOpWriteGetObjectResponse) HandleSerialize(ctx conte request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath) request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery) request.Method = "POST" - restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + var restEncoder *httpbinding.Encoder + if request.URL.RawPath == "" { + restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + } else { + request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath) + restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header) + } + if err != nil { return out, metadata, &smithy.SerializationError{Err: err} } @@ -8376,9 +8390,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(*v.AcceptRanges) } - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Server-Side-Encryption-Bucket-Key-Enabled" - encoder.SetHeader(locationName).Boolean(v.BucketKeyEnabled) + encoder.SetHeader(locationName).Boolean(*v.BucketKeyEnabled) } if v.CacheControl != nil && len(*v.CacheControl) > 0 { @@ -8421,9 +8435,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(*v.ContentLanguage) } - if v.ContentLength != 0 { + if v.ContentLength != nil { locationName := "Content-Length" - encoder.SetHeader(locationName).Long(v.ContentLength) + encoder.SetHeader(locationName).Long(*v.ContentLength) } if v.ContentRange != nil && len(*v.ContentRange) > 0 { @@ -8436,9 +8450,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(*v.ContentType) } - if v.DeleteMarker { + if v.DeleteMarker != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Delete-Marker" - encoder.SetHeader(locationName).Boolean(v.DeleteMarker) + encoder.SetHeader(locationName).Boolean(*v.DeleteMarker) } if v.ErrorCode != nil && len(*v.ErrorCode) > 0 { @@ -8480,9 +8494,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb } } - if v.MissingMeta != 0 { + if v.MissingMeta != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Missing-Meta" - encoder.SetHeader(locationName).Integer(v.MissingMeta) + encoder.SetHeader(locationName).Integer(*v.MissingMeta) } if len(v.ObjectLockLegalHoldStatus) > 0 { @@ -8500,9 +8514,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(smithytime.FormatDateTime(*v.ObjectLockRetainUntilDate)) } - if v.PartsCount != 0 { + if v.PartsCount != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Mp-Parts-Count" - encoder.SetHeader(locationName).Integer(v.PartsCount) + encoder.SetHeader(locationName).Integer(*v.PartsCount) } if len(v.ReplicationStatus) > 0 { @@ -8550,9 +8564,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(*v.SSEKMSKeyId) } - if v.StatusCode != 0 { + if v.StatusCode != nil { locationName := "X-Amz-Fwd-Status" - encoder.SetHeader(locationName).Integer(v.StatusCode) + encoder.SetHeader(locationName).Integer(*v.StatusCode) } if len(v.StorageClass) > 0 { @@ -8560,9 +8574,9 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb encoder.SetHeader(locationName).String(string(v.StorageClass)) } - if v.TagCount != 0 { + if v.TagCount != nil { locationName := "X-Amz-Fwd-Header-X-Amz-Tagging-Count" - encoder.SetHeader(locationName).Integer(v.TagCount) + encoder.SetHeader(locationName).Integer(*v.TagCount) } if v.VersionId != nil && len(*v.VersionId) > 0 { @@ -8575,7 +8589,7 @@ func awsRestxml_serializeOpHttpBindingsWriteGetObjectResponseInput(v *WriteGetOb func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIncompleteMultipartUpload, value smithyxml.Value) error { defer value.Close() - if v.DaysAfterInitiation != 0 { + if v.DaysAfterInitiation != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -8584,7 +8598,7 @@ func awsRestxml_serializeDocumentAbortIncompleteMultipartUpload(v *types.AbortIn Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.DaysAfterInitiation) + el.Integer(*v.DaysAfterInitiation) } return nil } @@ -8876,6 +8890,33 @@ func awsRestxml_serializeDocumentAnalyticsS3BucketDestination(v *types.Analytics return nil } +func awsRestxml_serializeDocumentBucketInfo(v *types.BucketInfo, value smithyxml.Value) error { + defer value.Close() + if len(v.DataRedundancy) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "DataRedundancy", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.DataRedundancy)) + } + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + func awsRestxml_serializeDocumentBucketLifecycleConfiguration(v *types.BucketLifecycleConfiguration, value smithyxml.Value) error { defer value.Close() if v.Rules != nil { @@ -8987,7 +9028,7 @@ func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smi el := value.MemberElement(root) el.String(*v.ETag) } - if v.PartNumber != 0 { + if v.PartNumber != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -8996,7 +9037,7 @@ func awsRestxml_serializeDocumentCompletedPart(v *types.CompletedPart, value smi Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.PartNumber) + el.Integer(*v.PartNumber) } return nil } @@ -9126,7 +9167,7 @@ func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Val el := value.MemberElement(root) el.String(*v.ID) } - if v.MaxAgeSeconds != 0 { + if v.MaxAgeSeconds != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9135,7 +9176,7 @@ func awsRestxml_serializeDocumentCORSRule(v *types.CORSRule, value smithyxml.Val Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.MaxAgeSeconds) + el.Integer(*v.MaxAgeSeconds) } return nil } @@ -9157,6 +9198,32 @@ func awsRestxml_serializeDocumentCORSRules(v []types.CORSRule, value smithyxml.V func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucketConfiguration, value smithyxml.Value) error { defer value.Close() + if v.Bucket != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Bucket", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentBucketInfo(v.Bucket, el); err != nil { + return err + } + } + if v.Location != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Location", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentLocationInfo(v.Location, el); err != nil { + return err + } + } if len(v.LocationConstraint) > 0 { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -9173,7 +9240,7 @@ func awsRestxml_serializeDocumentCreateBucketConfiguration(v *types.CreateBucket func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Value) error { defer value.Close() - if v.AllowQuotedRecordDelimiter { + if v.AllowQuotedRecordDelimiter != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9182,7 +9249,7 @@ func awsRestxml_serializeDocumentCSVInput(v *types.CSVInput, value smithyxml.Val Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.AllowQuotedRecordDelimiter) + el.Boolean(*v.AllowQuotedRecordDelimiter) } if v.Comments != nil { rootAttr := []smithyxml.Attr{} @@ -9315,7 +9382,7 @@ func awsRestxml_serializeDocumentCSVOutput(v *types.CSVOutput, value smithyxml.V func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, value smithyxml.Value) error { defer value.Close() - if v.Days != 0 { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9324,7 +9391,7 @@ func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, val Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } if len(v.Mode) > 0 { rootAttr := []smithyxml.Attr{} @@ -9337,7 +9404,7 @@ func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, val el := value.MemberElement(root) el.String(string(v.Mode)) } - if v.Years != 0 { + if v.Years != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9346,7 +9413,7 @@ func awsRestxml_serializeDocumentDefaultRetention(v *types.DefaultRetention, val Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Years) + el.Integer(*v.Years) } return nil } @@ -9366,7 +9433,7 @@ func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) return err } } - if v.Quiet { + if v.Quiet != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -9375,7 +9442,7 @@ func awsRestxml_serializeDocumentDelete(v *types.Delete, value smithyxml.Value) Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.Quiet) + el.Boolean(*v.Quiet) } return nil } @@ -10012,7 +10079,7 @@ func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfig el := value.MemberElement(root) el.String(string(v.IncludedObjectVersions)) } - { + if v.IsEnabled != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10021,7 +10088,7 @@ func awsRestxml_serializeDocumentInventoryConfiguration(v *types.InventoryConfig Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.IsEnabled) + el.Boolean(*v.IsEnabled) } if v.OptionalFields != nil { rootAttr := []smithyxml.Attr{} @@ -10328,7 +10395,7 @@ func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiratio el := value.MemberElement(root) el.String(smithytime.FormatDateTime(*v.Date)) } - if v.Days != 0 { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10337,9 +10404,9 @@ func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiratio Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } - if v.ExpiredObjectDeleteMarker { + if v.ExpiredObjectDeleteMarker != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10348,7 +10415,7 @@ func awsRestxml_serializeDocumentLifecycleExpiration(v *types.LifecycleExpiratio Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.ExpiredObjectDeleteMarker) + el.Boolean(*v.ExpiredObjectDeleteMarker) } return nil } @@ -10471,7 +10538,7 @@ func awsRestxml_serializeDocumentLifecycleRule(v *types.LifecycleRule, value smi func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRuleAndOperator, value smithyxml.Value) error { defer value.Close() - if v.ObjectSizeGreaterThan != 0 { + if v.ObjectSizeGreaterThan != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10480,9 +10547,9 @@ func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRule Attr: rootAttr, } el := value.MemberElement(root) - el.Long(v.ObjectSizeGreaterThan) + el.Long(*v.ObjectSizeGreaterThan) } - if v.ObjectSizeLessThan != 0 { + if v.ObjectSizeLessThan != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10491,7 +10558,7 @@ func awsRestxml_serializeDocumentLifecycleRuleAndOperator(v *types.LifecycleRule Attr: rootAttr, } el := value.MemberElement(root) - el.Long(v.ObjectSizeLessThan) + el.Long(*v.ObjectSizeLessThan) } if v.Prefix != nil { rootAttr := []smithyxml.Attr{} @@ -10604,6 +10671,33 @@ func awsRestxml_serializeDocumentLifecycleRules(v []types.LifecycleRule, value s return nil } +func awsRestxml_serializeDocumentLocationInfo(v *types.LocationInfo, value smithyxml.Value) error { + defer value.Close() + if v.Name != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Name", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(*v.Name) + } + if len(v.Type) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "Type", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.Type)) + } + return nil +} + func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value smithyxml.Value) error { defer value.Close() if v.TargetBucket != nil { @@ -10630,6 +10724,19 @@ func awsRestxml_serializeDocumentLoggingEnabled(v *types.LoggingEnabled, value s return err } } + if v.TargetObjectKeyFormat != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "TargetObjectKeyFormat", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentTargetObjectKeyFormat(v.TargetObjectKeyFormat, el); err != nil { + return err + } + } if v.TargetPrefix != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ @@ -10829,7 +10936,7 @@ func awsRestxml_serializeDocumentMetricsFilter(v types.MetricsFilter, value smit func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.NoncurrentVersionExpiration, value smithyxml.Value) error { defer value.Close() - if v.NewerNoncurrentVersions != 0 { + if v.NewerNoncurrentVersions != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10838,9 +10945,9 @@ func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.Noncurrent Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.NewerNoncurrentVersions) + el.Integer(*v.NewerNoncurrentVersions) } - if v.NoncurrentDays != 0 { + if v.NoncurrentDays != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10849,14 +10956,14 @@ func awsRestxml_serializeDocumentNoncurrentVersionExpiration(v *types.Noncurrent Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.NoncurrentDays) + el.Integer(*v.NoncurrentDays) } return nil } func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.NoncurrentVersionTransition, value smithyxml.Value) error { defer value.Close() - if v.NewerNoncurrentVersions != 0 { + if v.NewerNoncurrentVersions != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10865,9 +10972,9 @@ func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.Noncurrent Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.NewerNoncurrentVersions) + el.Integer(*v.NewerNoncurrentVersions) } - if v.NoncurrentDays != 0 { + if v.NoncurrentDays != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -10876,7 +10983,7 @@ func awsRestxml_serializeDocumentNoncurrentVersionTransition(v *types.Noncurrent Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.NoncurrentDays) + el.Integer(*v.NoncurrentDays) } if len(v.StorageClass) > 0 { rootAttr := []smithyxml.Attr{} @@ -11244,9 +11351,25 @@ func awsRestxml_serializeDocumentParquetInput(v *types.ParquetInput, value smith return nil } +func awsRestxml_serializeDocumentPartitionedPrefix(v *types.PartitionedPrefix, value smithyxml.Value) error { + defer value.Close() + if len(v.PartitionDateSource) > 0 { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PartitionDateSource", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + el.String(string(v.PartitionDateSource)) + } + return nil +} + func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicAccessBlockConfiguration, value smithyxml.Value) error { defer value.Close() - if v.BlockPublicAcls { + if v.BlockPublicAcls != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11255,9 +11378,9 @@ func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicA Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.BlockPublicAcls) + el.Boolean(*v.BlockPublicAcls) } - if v.BlockPublicPolicy { + if v.BlockPublicPolicy != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11266,9 +11389,9 @@ func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicA Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.BlockPublicPolicy) + el.Boolean(*v.BlockPublicPolicy) } - if v.IgnorePublicAcls { + if v.IgnorePublicAcls != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11277,9 +11400,9 @@ func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicA Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.IgnorePublicAcls) + el.Boolean(*v.IgnorePublicAcls) } - if v.RestrictPublicBuckets { + if v.RestrictPublicBuckets != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11288,7 +11411,7 @@ func awsRestxml_serializeDocumentPublicAccessBlockConfiguration(v *types.PublicA Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.RestrictPublicBuckets) + el.Boolean(*v.RestrictPublicBuckets) } return nil } @@ -11569,7 +11692,7 @@ func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value el := value.MemberElement(root) el.String(*v.Prefix) } - if v.Priority != 0 { + if v.Priority != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11578,7 +11701,7 @@ func awsRestxml_serializeDocumentReplicationRule(v *types.ReplicationRule, value Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Priority) + el.Integer(*v.Priority) } if v.SourceSelectionCriteria != nil { rootAttr := []smithyxml.Attr{} @@ -11729,7 +11852,7 @@ func awsRestxml_serializeDocumentReplicationTime(v *types.ReplicationTime, value func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeValue, value smithyxml.Value) error { defer value.Close() - if v.Minutes != 0 { + if v.Minutes != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11738,7 +11861,7 @@ func awsRestxml_serializeDocumentReplicationTimeValue(v *types.ReplicationTimeVa Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Minutes) + el.Integer(*v.Minutes) } return nil } @@ -11761,7 +11884,7 @@ func awsRestxml_serializeDocumentRequestPaymentConfiguration(v *types.RequestPay func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value smithyxml.Value) error { defer value.Close() - if v.Enabled { + if v.Enabled != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11770,14 +11893,14 @@ func awsRestxml_serializeDocumentRequestProgress(v *types.RequestProgress, value Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.Enabled) + el.Boolean(*v.Enabled) } return nil } func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value smithyxml.Value) error { defer value.Close() - if v.Days != 0 { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -11786,7 +11909,7 @@ func awsRestxml_serializeDocumentRestoreRequest(v *types.RestoreRequest, value s Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } if v.Description != nil { rootAttr := []smithyxml.Attr{} @@ -12037,7 +12160,7 @@ func awsRestxml_serializeDocumentS3Location(v *types.S3Location, value smithyxml func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.Value) error { defer value.Close() - if v.End != 0 { + if v.End != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -12046,9 +12169,9 @@ func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.V Attr: rootAttr, } el := value.MemberElement(root) - el.Long(v.End) + el.Long(*v.End) } - if v.Start != 0 { + if v.Start != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -12057,7 +12180,7 @@ func awsRestxml_serializeDocumentScanRange(v *types.ScanRange, value smithyxml.V Attr: rootAttr, } el := value.MemberElement(root) - el.Long(v.Start) + el.Long(*v.Start) } return nil } @@ -12175,7 +12298,7 @@ func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEnc return err } } - if v.BucketKeyEnabled { + if v.BucketKeyEnabled != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -12184,7 +12307,7 @@ func awsRestxml_serializeDocumentServerSideEncryptionRule(v *types.ServerSideEnc Attr: rootAttr, } el := value.MemberElement(root) - el.Boolean(v.BucketKeyEnabled) + el.Boolean(*v.BucketKeyEnabled) } return nil } @@ -12204,6 +12327,11 @@ func awsRestxml_serializeDocumentServerSideEncryptionRules(v []types.ServerSideE return nil } +func awsRestxml_serializeDocumentSimplePrefix(v *types.SimplePrefix, value smithyxml.Value) error { + defer value.Close() + return nil +} + func awsRestxml_serializeDocumentSourceSelectionCriteria(v *types.SourceSelectionCriteria, value smithyxml.Value) error { defer value.Close() if v.ReplicaModifications != nil { @@ -12443,6 +12571,37 @@ func awsRestxml_serializeDocumentTargetGrants(v []types.TargetGrant, value smith return nil } +func awsRestxml_serializeDocumentTargetObjectKeyFormat(v *types.TargetObjectKeyFormat, value smithyxml.Value) error { + defer value.Close() + if v.PartitionedPrefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "PartitionedPrefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentPartitionedPrefix(v.PartitionedPrefix, el); err != nil { + return err + } + } + if v.SimplePrefix != nil { + rootAttr := []smithyxml.Attr{} + root := smithyxml.StartElement{ + Name: smithyxml.Name{ + Local: "SimplePrefix", + }, + Attr: rootAttr, + } + el := value.MemberElement(root) + if err := awsRestxml_serializeDocumentSimplePrefix(v.SimplePrefix, el); err != nil { + return err + } + } + return nil +} + func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value) error { defer value.Close() if len(v.AccessTier) > 0 { @@ -12456,7 +12615,7 @@ func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value el := value.MemberElement(root) el.String(string(v.AccessTier)) } - { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -12465,7 +12624,7 @@ func awsRestxml_serializeDocumentTiering(v *types.Tiering, value smithyxml.Value Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } return nil } @@ -12566,7 +12725,7 @@ func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml el := value.MemberElement(root) el.String(smithytime.FormatDateTime(*v.Date)) } - if v.Days != 0 { + if v.Days != nil { rootAttr := []smithyxml.Attr{} root := smithyxml.StartElement{ Name: smithyxml.Name{ @@ -12575,7 +12734,7 @@ func awsRestxml_serializeDocumentTransition(v *types.Transition, value smithyxml Attr: rootAttr, } el := value.MemberElement(root) - el.Integer(v.Days) + el.Integer(*v.Days) } if len(v.StorageClass) > 0 { rootAttr := []smithyxml.Attr{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go index 79c5e688..ea3b9c82 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go @@ -86,6 +86,7 @@ const ( BucketLocationConstraintApNortheast2 BucketLocationConstraint = "ap-northeast-2" BucketLocationConstraintApNortheast3 BucketLocationConstraint = "ap-northeast-3" BucketLocationConstraintApSouth1 BucketLocationConstraint = "ap-south-1" + BucketLocationConstraintApSouth2 BucketLocationConstraint = "ap-south-2" BucketLocationConstraintApSoutheast1 BucketLocationConstraint = "ap-southeast-1" BucketLocationConstraintApSoutheast2 BucketLocationConstraint = "ap-southeast-2" BucketLocationConstraintApSoutheast3 BucketLocationConstraint = "ap-southeast-3" @@ -96,6 +97,7 @@ const ( BucketLocationConstraintEuCentral1 BucketLocationConstraint = "eu-central-1" BucketLocationConstraintEuNorth1 BucketLocationConstraint = "eu-north-1" BucketLocationConstraintEuSouth1 BucketLocationConstraint = "eu-south-1" + BucketLocationConstraintEuSouth2 BucketLocationConstraint = "eu-south-2" BucketLocationConstraintEuWest1 BucketLocationConstraint = "eu-west-1" BucketLocationConstraintEuWest2 BucketLocationConstraint = "eu-west-2" BucketLocationConstraintEuWest3 BucketLocationConstraint = "eu-west-3" @@ -108,9 +110,9 @@ const ( BucketLocationConstraintUsWest2 BucketLocationConstraint = "us-west-2" ) -// Values returns all known values for BucketLocationConstraint. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// Values returns all known values for BucketLocationConstraint. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. func (BucketLocationConstraint) Values() []BucketLocationConstraint { return []BucketLocationConstraint{ "af-south-1", @@ -119,6 +121,7 @@ func (BucketLocationConstraint) Values() []BucketLocationConstraint { "ap-northeast-2", "ap-northeast-3", "ap-south-1", + "ap-south-2", "ap-southeast-1", "ap-southeast-2", "ap-southeast-3", @@ -129,6 +132,7 @@ func (BucketLocationConstraint) Values() []BucketLocationConstraint { "eu-central-1", "eu-north-1", "eu-south-1", + "eu-south-2", "eu-west-1", "eu-west-2", "eu-west-3", @@ -162,6 +166,22 @@ func (BucketLogsPermission) Values() []BucketLogsPermission { } } +type BucketType string + +// Enum values for BucketType +const ( + BucketTypeDirectory BucketType = "Directory" +) + +// Values returns all known values for BucketType. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (BucketType) Values() []BucketType { + return []BucketType{ + "Directory", + } +} + type BucketVersioningStatus string // Enum values for BucketVersioningStatus @@ -209,9 +229,9 @@ const ( ChecksumModeEnabled ChecksumMode = "ENABLED" ) -// Values returns all known values for ChecksumMode. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// Values returns all known values for ChecksumMode. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. func (ChecksumMode) Values() []ChecksumMode { return []ChecksumMode{ "ENABLED", @@ -238,6 +258,22 @@ func (CompressionType) Values() []CompressionType { } } +type DataRedundancy string + +// Enum values for DataRedundancy +const ( + DataRedundancySingleAvailabilityZone DataRedundancy = "SingleAvailabilityZone" +) + +// Values returns all known values for DataRedundancy. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (DataRedundancy) Values() []DataRedundancy { + return []DataRedundancy{ + "SingleAvailabilityZone", + } +} + type DeleteMarkerReplicationStatus string // Enum values for DeleteMarkerReplicationStatus @@ -264,9 +300,9 @@ const ( EncodingTypeUrl EncodingType = "url" ) -// Values returns all known values for EncodingType. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// Values returns all known values for EncodingType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. func (EncodingType) Values() []EncodingType { return []EncodingType{ "url", @@ -275,8 +311,39 @@ func (EncodingType) Values() []EncodingType { type Event string -// Values returns all known values for Event. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this +// Enum values for Event +const ( + EventS3ReducedRedundancyLostObject Event = "s3:ReducedRedundancyLostObject" + EventS3ObjectCreated Event = "s3:ObjectCreated:*" + EventS3ObjectCreatedPut Event = "s3:ObjectCreated:Put" + EventS3ObjectCreatedPost Event = "s3:ObjectCreated:Post" + EventS3ObjectCreatedCopy Event = "s3:ObjectCreated:Copy" + EventS3ObjectCreatedCompleteMultipartUpload Event = "s3:ObjectCreated:CompleteMultipartUpload" + EventS3ObjectRemoved Event = "s3:ObjectRemoved:*" + EventS3ObjectRemovedDelete Event = "s3:ObjectRemoved:Delete" + EventS3ObjectRemovedDeleteMarkerCreated Event = "s3:ObjectRemoved:DeleteMarkerCreated" + EventS3ObjectRestore Event = "s3:ObjectRestore:*" + EventS3ObjectRestorePost Event = "s3:ObjectRestore:Post" + EventS3ObjectRestoreCompleted Event = "s3:ObjectRestore:Completed" + EventS3Replication Event = "s3:Replication:*" + EventS3ReplicationOperationFailedReplication Event = "s3:Replication:OperationFailedReplication" + EventS3ReplicationOperationNotTracked Event = "s3:Replication:OperationNotTracked" + EventS3ReplicationOperationMissedThreshold Event = "s3:Replication:OperationMissedThreshold" + EventS3ReplicationOperationReplicatedAfterThreshold Event = "s3:Replication:OperationReplicatedAfterThreshold" + EventS3ObjectRestoreDelete Event = "s3:ObjectRestore:Delete" + EventS3LifecycleTransition Event = "s3:LifecycleTransition" + EventS3IntelligentTiering Event = "s3:IntelligentTiering" + EventS3ObjectAclPut Event = "s3:ObjectAcl:Put" + EventS3LifecycleExpiration Event = "s3:LifecycleExpiration:*" + EventS3LifecycleExpirationDelete Event = "s3:LifecycleExpiration:Delete" + EventS3LifecycleExpirationDeleteMarkerCreated Event = "s3:LifecycleExpiration:DeleteMarkerCreated" + EventS3ObjectTagging Event = "s3:ObjectTagging:*" + EventS3ObjectTaggingPut Event = "s3:ObjectTagging:Put" + EventS3ObjectTaggingDelete Event = "s3:ObjectTagging:Delete" +) + +// Values returns all known values for Event. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this // slice is not guaranteed to be stable across updates. func (Event) Values() []Event { return []Event{ @@ -409,9 +476,10 @@ const ( IntelligentTieringAccessTierDeepArchiveAccess IntelligentTieringAccessTier = "DEEP_ARCHIVE_ACCESS" ) -// Values returns all known values for IntelligentTieringAccessTier. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// The ordering of this slice is not guaranteed to be stable across updates. +// Values returns all known values for IntelligentTieringAccessTier. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. func (IntelligentTieringAccessTier) Values() []IntelligentTieringAccessTier { return []IntelligentTieringAccessTier{ "ARCHIVE_ACCESS", @@ -427,9 +495,9 @@ const ( IntelligentTieringStatusDisabled IntelligentTieringStatus = "Disabled" ) -// Values returns all known values for IntelligentTieringStatus. Note that this can -// be expanded in the future, and so it is only as up to date as the client. The -// ordering of this slice is not guaranteed to be stable across updates. +// Values returns all known values for IntelligentTieringStatus. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. func (IntelligentTieringStatus) Values() []IntelligentTieringStatus { return []IntelligentTieringStatus{ "Enabled", @@ -511,6 +579,8 @@ const ( InventoryOptionalFieldIntelligentTieringAccessTier InventoryOptionalField = "IntelligentTieringAccessTier" InventoryOptionalFieldBucketKeyStatus InventoryOptionalField = "BucketKeyStatus" InventoryOptionalFieldChecksumAlgorithm InventoryOptionalField = "ChecksumAlgorithm" + InventoryOptionalFieldObjectAccessControlList InventoryOptionalField = "ObjectAccessControlList" + InventoryOptionalFieldObjectOwner InventoryOptionalField = "ObjectOwner" ) // Values returns all known values for InventoryOptionalField. Note that this can @@ -531,6 +601,8 @@ func (InventoryOptionalField) Values() []InventoryOptionalField { "IntelligentTieringAccessTier", "BucketKeyStatus", "ChecksumAlgorithm", + "ObjectAccessControlList", + "ObjectOwner", } } @@ -552,6 +624,22 @@ func (JSONType) Values() []JSONType { } } +type LocationType string + +// Enum values for LocationType +const ( + LocationTypeAvailabilityZone LocationType = "AvailabilityZone" +) + +// Values returns all known values for LocationType. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (LocationType) Values() []LocationType { + return []LocationType{ + "AvailabilityZone", + } +} + type MetadataDirective string // Enum values for MetadataDirective @@ -596,9 +684,9 @@ const ( MFADeleteDisabled MFADelete = "Disabled" ) -// Values returns all known values for MFADelete. Note that this can be expanded in -// the future, and so it is only as up to date as the client. The ordering of this -// slice is not guaranteed to be stable across updates. +// Values returns all known values for MFADelete. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. func (MFADelete) Values() []MFADelete { return []MFADelete{ "Enabled", @@ -779,6 +867,8 @@ const ( ObjectStorageClassDeepArchive ObjectStorageClass = "DEEP_ARCHIVE" ObjectStorageClassOutposts ObjectStorageClass = "OUTPOSTS" ObjectStorageClassGlacierIr ObjectStorageClass = "GLACIER_IR" + ObjectStorageClassSnow ObjectStorageClass = "SNOW" + ObjectStorageClassExpressOnezone ObjectStorageClass = "EXPRESS_ONEZONE" ) // Values returns all known values for ObjectStorageClass. Note that this can be @@ -795,6 +885,8 @@ func (ObjectStorageClass) Values() []ObjectStorageClass { "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", + "SNOW", + "EXPRESS_ONEZONE", } } @@ -814,6 +906,22 @@ func (ObjectVersionStorageClass) Values() []ObjectVersionStorageClass { } } +type OptionalObjectAttributes string + +// Enum values for OptionalObjectAttributes +const ( + OptionalObjectAttributesRestoreStatus OptionalObjectAttributes = "RestoreStatus" +) + +// Values returns all known values for OptionalObjectAttributes. Note that this +// can be expanded in the future, and so it is only as up to date as the client. +// The ordering of this slice is not guaranteed to be stable across updates. +func (OptionalObjectAttributes) Values() []OptionalObjectAttributes { + return []OptionalObjectAttributes{ + "RestoreStatus", + } +} + type OwnerOverride string // Enum values for OwnerOverride @@ -830,6 +938,24 @@ func (OwnerOverride) Values() []OwnerOverride { } } +type PartitionDateSource string + +// Enum values for PartitionDateSource +const ( + PartitionDateSourceEventTime PartitionDateSource = "EventTime" + PartitionDateSourceDeliveryTime PartitionDateSource = "DeliveryTime" +) + +// Values returns all known values for PartitionDateSource. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. +func (PartitionDateSource) Values() []PartitionDateSource { + return []PartitionDateSource{ + "EventTime", + "DeliveryTime", + } +} + type Payer string // Enum values for Payer @@ -838,8 +964,8 @@ const ( PayerBucketOwner Payer = "BucketOwner" ) -// Values returns all known values for Payer. Note that this can be expanded in the -// future, and so it is only as up to date as the client. The ordering of this +// Values returns all known values for Payer. Note that this can be expanded in +// the future, and so it is only as up to date as the client. The ordering of this // slice is not guaranteed to be stable across updates. func (Payer) Values() []Payer { return []Payer{ @@ -934,8 +1060,8 @@ const ( ReplicationRuleStatusDisabled ReplicationRuleStatus = "Disabled" ) -// Values returns all known values for ReplicationRuleStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The +// Values returns all known values for ReplicationRuleStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (ReplicationRuleStatus) Values() []ReplicationRuleStatus { return []ReplicationRuleStatus{ @@ -948,10 +1074,11 @@ type ReplicationStatus string // Enum values for ReplicationStatus const ( - ReplicationStatusComplete ReplicationStatus = "COMPLETE" - ReplicationStatusPending ReplicationStatus = "PENDING" - ReplicationStatusFailed ReplicationStatus = "FAILED" - ReplicationStatusReplica ReplicationStatus = "REPLICA" + ReplicationStatusComplete ReplicationStatus = "COMPLETE" + ReplicationStatusPending ReplicationStatus = "PENDING" + ReplicationStatusFailed ReplicationStatus = "FAILED" + ReplicationStatusReplica ReplicationStatus = "REPLICA" + ReplicationStatusCompleted ReplicationStatus = "COMPLETED" ) // Values returns all known values for ReplicationStatus. Note that this can be @@ -963,6 +1090,7 @@ func (ReplicationStatus) Values() []ReplicationStatus { "PENDING", "FAILED", "REPLICA", + "COMPLETED", } } @@ -974,8 +1102,8 @@ const ( ReplicationTimeStatusDisabled ReplicationTimeStatus = "Disabled" ) -// Values returns all known values for ReplicationTimeStatus. Note that this can be -// expanded in the future, and so it is only as up to date as the client. The +// Values returns all known values for ReplicationTimeStatus. Note that this can +// be expanded in the future, and so it is only as up to date as the client. The // ordering of this slice is not guaranteed to be stable across updates. func (ReplicationTimeStatus) Values() []ReplicationTimeStatus { return []ReplicationTimeStatus{ @@ -1007,9 +1135,9 @@ const ( RequestPayerRequester RequestPayer = "requester" ) -// Values returns all known values for RequestPayer. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// Values returns all known values for RequestPayer. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. func (RequestPayer) Values() []RequestPayer { return []RequestPayer{ "requester", @@ -1036,8 +1164,9 @@ type ServerSideEncryption string // Enum values for ServerSideEncryption const ( - ServerSideEncryptionAes256 ServerSideEncryption = "AES256" - ServerSideEncryptionAwsKms ServerSideEncryption = "aws:kms" + ServerSideEncryptionAes256 ServerSideEncryption = "AES256" + ServerSideEncryptionAwsKms ServerSideEncryption = "aws:kms" + ServerSideEncryptionAwsKmsDsse ServerSideEncryption = "aws:kms:dsse" ) // Values returns all known values for ServerSideEncryption. Note that this can be @@ -1047,6 +1176,25 @@ func (ServerSideEncryption) Values() []ServerSideEncryption { return []ServerSideEncryption{ "AES256", "aws:kms", + "aws:kms:dsse", + } +} + +type SessionMode string + +// Enum values for SessionMode +const ( + SessionModeReadOnly SessionMode = "ReadOnly" + SessionModeReadWrite SessionMode = "ReadWrite" +) + +// Values returns all known values for SessionMode. Note that this can be expanded +// in the future, and so it is only as up to date as the client. The ordering of +// this slice is not guaranteed to be stable across updates. +func (SessionMode) Values() []SessionMode { + return []SessionMode{ + "ReadOnly", + "ReadWrite", } } @@ -1058,9 +1206,10 @@ const ( SseKmsEncryptedObjectsStatusDisabled SseKmsEncryptedObjectsStatus = "Disabled" ) -// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that this -// can be expanded in the future, and so it is only as up to date as the client. -// The ordering of this slice is not guaranteed to be stable across updates. +// Values returns all known values for SseKmsEncryptedObjectsStatus. Note that +// this can be expanded in the future, and so it is only as up to date as the +// client. The ordering of this slice is not guaranteed to be stable across +// updates. func (SseKmsEncryptedObjectsStatus) Values() []SseKmsEncryptedObjectsStatus { return []SseKmsEncryptedObjectsStatus{ "Enabled", @@ -1081,11 +1230,13 @@ const ( StorageClassDeepArchive StorageClass = "DEEP_ARCHIVE" StorageClassOutposts StorageClass = "OUTPOSTS" StorageClassGlacierIr StorageClass = "GLACIER_IR" + StorageClassSnow StorageClass = "SNOW" + StorageClassExpressOnezone StorageClass = "EXPRESS_ONEZONE" ) -// Values returns all known values for StorageClass. Note that this can be expanded -// in the future, and so it is only as up to date as the client. The ordering of -// this slice is not guaranteed to be stable across updates. +// Values returns all known values for StorageClass. Note that this can be +// expanded in the future, and so it is only as up to date as the client. The +// ordering of this slice is not guaranteed to be stable across updates. func (StorageClass) Values() []StorageClass { return []StorageClass{ "STANDARD", @@ -1097,6 +1248,8 @@ func (StorageClass) Values() []StorageClass { "DEEP_ARCHIVE", "OUTPOSTS", "GLACIER_IR", + "SNOW", + "EXPRESS_ONEZONE", } } @@ -1107,8 +1260,8 @@ const ( StorageClassAnalysisSchemaVersionV1 StorageClassAnalysisSchemaVersion = "V_1" ) -// Values returns all known values for StorageClassAnalysisSchemaVersion. Note that -// this can be expanded in the future, and so it is only as up to date as the +// Values returns all known values for StorageClassAnalysisSchemaVersion. Note +// that this can be expanded in the future, and so it is only as up to date as the // client. The ordering of this slice is not guaranteed to be stable across // updates. func (StorageClassAnalysisSchemaVersion) Values() []StorageClassAnalysisSchemaVersion { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go index 18134c52..166484f4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/errors.go @@ -34,11 +34,11 @@ func (e *BucketAlreadyExists) ErrorCode() string { } func (e *BucketAlreadyExists) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// The bucket you tried to create already exists, and you own it. Amazon S3 returns -// this error in all Amazon Web Services Regions except in the North Virginia -// Region. For legacy compatibility, if you re-create an existing bucket that you -// already own in the North Virginia Region, Amazon S3 returns 200 OK and resets -// the bucket access control lists (ACLs). +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all Amazon Web Services Regions except in the North +// Virginia Region. For legacy compatibility, if you re-create an existing bucket +// that you already own in the North Virginia Region, Amazon S3 returns 200 OK and +// resets the bucket access control lists (ACLs). type BucketAlreadyOwnedByYou struct { Message *string @@ -64,7 +64,14 @@ func (e *BucketAlreadyOwnedByYou) ErrorCode() string { } func (e *BucketAlreadyOwnedByYou) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } -// Object is archived and inaccessible until restored. +// Object is archived and inaccessible until restored. If the object you are +// retrieving is stored in the S3 Glacier Flexible Retrieval storage class, the S3 +// Glacier Deep Archive storage class, the S3 Intelligent-Tiering Archive Access +// tier, or the S3 Intelligent-Tiering Deep Archive Access tier, before you can +// retrieve the object you must first restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// . Otherwise, this operation returns an InvalidObjectState error. For +// information about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. type InvalidObjectState struct { Message *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go index bcef62ce..d3f7593f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go @@ -9,23 +9,21 @@ import ( // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For -// more information, see Aborting Incomplete Multipart Uploads Using a Bucket -// Lifecycle Policy -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) // in the Amazon S3 User Guide. type AbortIncompleteMultipartUpload struct { // Specifies the number of days after which Amazon S3 aborts an incomplete // multipart upload. - DaysAfterInitiation int32 + DaysAfterInitiation *int32 noSmithyDocumentSerde } // Configures the transfer acceleration state for an Amazon S3 bucket. For more -// information, see Amazon S3 Transfer Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) in -// the Amazon S3 User Guide. +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. type AccelerateConfiguration struct { // Specifies the transfer acceleration status of the bucket. @@ -50,8 +48,7 @@ type AccessControlPolicy struct { type AccessControlTranslation struct { // Specifies the replica ownership. For default and valid values, see PUT bucket - // replication - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) // in the Amazon S3 API Reference. // // This member is required. @@ -60,9 +57,10 @@ type AccessControlTranslation struct { noSmithyDocumentSerde } -// A conjunction (logical AND) of predicates, which is used in evaluating a metrics -// filter. The operator must have at least two predicates in any combination, and -// an object must match all of the predicates for the filter to apply. +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any +// combination, and an object must match all of the predicates for the filter to +// apply. type AnalyticsAndOperator struct { // The prefix to use when evaluating an AND predicate: The prefix that an object @@ -175,9 +173,7 @@ type AnalyticsS3BucketDestination struct { noSmithyDocumentSerde } -// In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name is -// globally unique, and the namespace is shared by all Amazon Web Services -// accounts. +// In terms of implementation, a Bucket is a resource. type Bucket struct { // Date the bucket was created. This date can change when making changes to your @@ -190,10 +186,24 @@ type Bucket struct { noSmithyDocumentSerde } +// Specifies the information about the bucket that will be created. For more +// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. This functionality is only supported by directory +// buckets. +type BucketInfo struct { + + // The number of Availability Zone that's used for redundancy for the bucket. + DataRedundancy DataRedundancy + + // The type of bucket. + Type BucketType + + noSmithyDocumentSerde +} + // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. For -// more information, see Object Lifecycle Management -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) in -// the Amazon S3 User Guide. +// more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. type BucketLifecycleConfiguration struct { // A lifecycle rule for individual objects in an Amazon S3 bucket. @@ -207,10 +217,9 @@ type BucketLifecycleConfiguration struct { // Container for logging status information. type BucketLoggingStatus struct { - // Describes where logs are stored and the prefix that Amazon S3 assigns to all log - // object keys for a bucket. For more information, see PUT Bucket logging - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in - // the Amazon S3 API Reference. + // Describes where logs are stored and the prefix that Amazon S3 assigns to all + // log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. LoggingEnabled *LoggingEnabled noSmithyDocumentSerde @@ -220,45 +229,49 @@ type BucketLoggingStatus struct { type Checksum struct { // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string noSmithyDocumentSerde } -// Container for all (if there are any) keys between Prefix and the next occurrence -// of the string specified by a delimiter. CommonPrefixes lists keys that act like -// subdirectories in the directory specified by Prefix. For example, if the prefix -// is notes/ and the delimiter is a slash (/) as in notes/summer/july, the common -// prefix is notes/summer/. +// Container for all (if there are any) keys between Prefix and the next +// occurrence of the string specified by a delimiter. CommonPrefixes lists keys +// that act like subdirectories in the directory specified by Prefix. For example, +// if the prefix is notes/ and the delimiter is a slash (/) as in +// notes/summer/july, the common prefix is notes/summer/. type CommonPrefix struct { // Container for the specified common prefix. @@ -281,34 +294,38 @@ type CompletedMultipartUpload struct { type CompletedPart struct { // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -317,7 +334,15 @@ type CompletedPart struct { // Part number that identifies the part. This is a positive integer between 1 and // 10,000. - PartNumber int32 + // - General purpose buckets - In CompleteMultipartUpload , when a additional + // checksum (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , + // x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is applied to each part, the + // PartNumber must start at 1 and the part numbers must be consecutive. + // Otherwise, Amazon S3 generates an HTTP 400 Bad Request status code and an + // InvalidPartOrder error code. + // - Directory buckets - In CompleteMultipartUpload , the PartNumber must start + // at 1 and the part numbers must be consecutive. + PartNumber *int32 noSmithyDocumentSerde } @@ -336,15 +361,15 @@ type Condition struct { HttpErrorCodeReturnedEquals *string // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. - // To redirect request for all pages with the prefix docs/, the key prefix will be - // /docs, which identifies all objects in the docs/ folder. Required when the - // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals is - // not specified. If both conditions are specified, both must be true for the + // redirect requests for ExamplePage.html , the key prefix will be ExamplePage.html + // . To redirect request for all pages with the prefix docs/ , the key prefix will + // be /docs , which identifies all objects in the docs/ folder. Required when the + // parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for the // redirect to be applied. Replacement must be made for object keys containing // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . KeyPrefixEquals *string noSmithyDocumentSerde @@ -358,34 +383,26 @@ type ContinuationEvent struct { type CopyObjectResult struct { // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -403,34 +420,38 @@ type CopyObjectResult struct { type CopyPartResult struct { // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -444,9 +465,8 @@ type CopyPartResult struct { } // Describes the cross-origin access configuration for objects in an Amazon S3 -// bucket. For more information, see Enabling Cross-Origin Resource Sharing -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon S3 -// User Guide. +// bucket. For more information, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. type CORSConfiguration struct { // A set of origins and methods (cross-origin access that you want to allow). You @@ -461,8 +481,8 @@ type CORSConfiguration struct { // Specifies a cross-origin access rule for an Amazon S3 bucket. type CORSRule struct { - // An HTTP method that you allow the origin to execute. Valid values are GET, PUT, - // HEAD, POST, and DELETE. + // An HTTP method that you allow the origin to execute. Valid values are GET , PUT + // , HEAD , POST , and DELETE . // // This member is required. AllowedMethods []string @@ -477,16 +497,17 @@ type CORSRule struct { // OPTIONS request, Amazon S3 returns any requested headers that are allowed. AllowedHeaders []string - // One or more headers in the response that you want customers to be able to access - // from their applications (for example, from a JavaScript XMLHttpRequest object). + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). ExposeHeaders []string // Unique identifier for the rule. The value cannot be longer than 255 characters. ID *string - // The time in seconds that your browser is to cache the preflight response for the - // specified resource. - MaxAgeSeconds int32 + // The time in seconds that your browser is to cache the preflight response for + // the specified resource. + MaxAgeSeconds *int32 noSmithyDocumentSerde } @@ -494,8 +515,23 @@ type CORSRule struct { // The configuration information for the bucket. type CreateBucketConfiguration struct { - // Specifies the Region where the bucket will be created. If you don't specify a - // Region, the bucket is created in the US East (N. Virginia) Region (us-east-1). + // Specifies the information about the bucket that will be created. This + // functionality is only supported by directory buckets. + Bucket *BucketInfo + + // Specifies the location where the bucket will be created. For directory buckets, + // the location type is Availability Zone. This functionality is only supported by + // directory buckets. + Location *LocationInfo + + // Specifies the Region where the bucket will be created. You might choose a + // Region to optimize latency, minimize costs, or address regulatory requirements. + // For example, if you reside in Europe, you will probably find it advantageous to + // create buckets in the Europe (Ireland) Region. For more information, see + // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // in the Amazon S3 User Guide. If you don't specify a Region, the bucket is + // created in the US East (N. Virginia) Region (us-east-1) by default. This + // functionality is not supported for directory buckets. LocationConstraint BucketLocationConstraint noSmithyDocumentSerde @@ -508,11 +544,11 @@ type CSVInput struct { // Specifies that CSV field values may contain quoted record delimiters and such // records should be allowed. Default value is FALSE. Setting this value to TRUE // may lower performance. - AllowQuotedRecordDelimiter bool + AllowQuotedRecordDelimiter *bool // A single character used to indicate that a row should be ignored when the // character is present at the start of that row. You can specify any character to - // indicate a comment line. + // indicate a comment line. The default character is # . Default: # Comments *string // A single character used to separate individual fields in a record. You can @@ -520,27 +556,22 @@ type CSVInput struct { FieldDelimiter *string // Describes the first line of input. Valid values are: - // - // * NONE: First line is not - // a header. - // - // * IGNORE: First line is a header, but you can't use the header values - // to indicate the column in an expression. You can use column position (such as - // _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). - // - // * Use: First - // line is a header, and you can use the header value to identify a column in an - // expression (SELECT "name" FROM OBJECT). + // - NONE : First line is not a header. + // - IGNORE : First line is a header, but you can't use the header values to + // indicate the column in an expression. You can use column position (such as _1, + // _2, …) to indicate the column ( SELECT s._1 FROM OBJECT s ). + // - Use : First line is a header, and you can use the header value to identify a + // column in an expression ( SELECT "name" FROM OBJECT ). FileHeaderInfo FileHeaderInfo // A single character used for escaping when the field delimiter is part of the - // value. For example, if the value is a, b, Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b ". Type: String Default: " Ancestors: CSV + // value. For example, if the value is a, b , Amazon S3 wraps this field value in + // quotation marks, as follows: " a , b " . Type: String Default: " Ancestors: CSV QuoteCharacter *string // A single character used for escaping the quotation mark character inside an - // already escaped value. For example, the value """ a , b """ is parsed as " a , b - // ". + // already escaped value. For example, the value """ a , b """ is parsed as " a , + // b " . QuoteEscapeCharacter *string // A single character used to separate individual records in the input. Instead of @@ -559,8 +590,8 @@ type CSVOutput struct { FieldDelimiter *string // A single character used for escaping when the field delimiter is part of the - // value. For example, if the value is a, b, Amazon S3 wraps this field value in - // quotation marks, as follows: " a , b ". + // value. For example, if the value is a, b , Amazon S3 wraps this field value in + // quotation marks, as follows: " a , b " . QuoteCharacter *string // The single character used for escaping the quote character inside an already @@ -568,16 +599,12 @@ type CSVOutput struct { QuoteEscapeCharacter *string // Indicates whether to use quotation marks around output fields. - // - // * ALWAYS: Always - // use quotation marks for output fields. - // - // * ASNEEDED: Use quotation marks for - // output fields when needed. + // - ALWAYS : Always use quotation marks for output fields. + // - ASNEEDED : Use quotation marks for output fields when needed. QuoteFields QuoteFields - // A single character used to separate individual records in the output. Instead of - // the default value, you can specify an arbitrary delimiter. + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. RecordDelimiter *string noSmithyDocumentSerde @@ -585,26 +612,22 @@ type CSVOutput struct { // The container element for specifying the default Object Lock retention settings // for new objects placed in the specified bucket. -// -// * The DefaultRetention settings -// require both a mode and a period. -// -// * The DefaultRetention period can be either -// Days or Years but you must select one. You cannot specify Days and Years at the -// same time. +// - The DefaultRetention settings require both a mode and a period. +// - The DefaultRetention period can be either Days or Years but you must select +// one. You cannot specify Days and Years at the same time. type DefaultRetention struct { // The number of days that you want to specify for the default retention period. - // Must be used with Mode. - Days int32 + // Must be used with Mode . + Days *int32 // The default Object Lock retention mode you want to apply to new objects placed - // in the specified bucket. Must be used with either Days or Years. + // in the specified bucket. Must be used with either Days or Years . Mode ObjectLockRetentionMode // The number of years that you want to specify for the default retention period. - // Must be used with Mode. - Years int32 + // Must be used with Mode . + Years *int32 noSmithyDocumentSerde } @@ -612,14 +635,17 @@ type DefaultRetention struct { // Container for the objects to delete. type Delete struct { - // The objects to delete. + // The object to delete. Directory buckets - For directory buckets, an object + // that's composed entirely of whitespace characters is not supported by the + // DeleteObjects API operation. The request will receive a 400 Bad Request error + // and none of the objects in the request will be deleted. // // This member is required. Objects []ObjectIdentifier // Element to enable quiet mode for the request. When you add this element, you - // must set its value to true. - Quiet bool + // must set its value to true . + Quiet *bool noSmithyDocumentSerde } @@ -627,20 +653,24 @@ type Delete struct { // Information about the deleted object. type DeletedObject struct { - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. In a simple DELETE, this header indicates - // whether (true) or not (false) a delete marker was created. - DeleteMarker bool - - // The version ID of the delete marker created as a result of the DELETE operation. - // If you delete a specific object version, the value returned by this header is - // the version ID of the object version deleted. + // Indicates whether the specified object version that was permanently deleted was + // (true) or was not (false) a delete marker before deletion. In a simple DELETE, + // this header indicates whether (true) or not (false) the current version of the + // object is a delete marker. This functionality is not supported for directory + // buckets. + DeleteMarker *bool + + // The version ID of the delete marker created as a result of the DELETE + // operation. If you delete a specific object version, the value returned by this + // header is the version ID of the object version deleted. This functionality is + // not supported for directory buckets. DeleteMarkerVersionId *string // The name of the deleted object. Key *string - // The version ID of the deleted object. + // The version ID of the deleted object. This functionality is not supported for + // directory buckets. VersionId *string noSmithyDocumentSerde @@ -651,12 +681,12 @@ type DeleteMarkerEntry struct { // Specifies whether the object is (true) or is not (false) the latest version of // an object. - IsLatest bool + IsLatest *bool // The object key. Key *string - // Date and time the object was last modified. + // Date and time when the object was last modified. LastModified *time.Time // The account that created the delete marker.> @@ -671,17 +701,15 @@ type DeleteMarkerEntry struct { // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter // in your replication configuration, you must also include a // DeleteMarkerReplication element. If your Filter includes a Tag element, the -// DeleteMarkerReplicationStatus must be set to Disabled, because Amazon S3 does +// DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example -// configuration, see Basic Rule Configuration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). -// For more information about delete marker replication, see Basic Rule -// Configuration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). -// If you are using an earlier version of the replication configuration, Amazon S3 -// handles replication of delete markers differently. For more information, see -// Backward Compatibility -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +// configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) +// . For more information about delete marker replication, see Basic Rule +// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) +// . If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, see +// Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) +// . type DeleteMarkerReplication struct { // Indicates whether to replicate delete markers. Indicates whether to replicate @@ -710,10 +738,10 @@ type Destination struct { // Destination bucket owner account ID. In a cross-account scenario, if you direct // Amazon S3 to change replica ownership to the Amazon Web Services account that - // owns the destination bucket by specifying the AccessControlTranslation property, - // this is the account ID of the destination bucket owner. For more information, - // see Replication Additional Configuration: Changing the Replica Owner - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // owns the destination bucket by specifying the AccessControlTranslation + // property, this is the account ID of the destination bucket owner. For more + // information, see Replication Additional Configuration: Changing the Replica + // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) // in the Amazon S3 User Guide. Account *string @@ -721,8 +749,8 @@ type Destination struct { // SourceSelectionCriteria is specified, you must specify this element. EncryptionConfiguration *EncryptionConfiguration - // A container specifying replication metrics-related settings enabling replication - // metrics and events. + // A container specifying replication metrics-related settings enabling + // replication metrics and events. Metrics *Metrics // A container specifying S3 Replication Time Control (S3 RTC), including whether @@ -733,8 +761,7 @@ type Destination struct { // The storage class to use when replicating objects, such as S3 Standard or // reduced redundancy. By default, Amazon S3 uses the storage class of the source // object to create the object replica. For valid values, see the StorageClass - // element of the PUT Bucket replication - // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // element of the PUT Bucket replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) // action in the Amazon S3 API Reference. StorageClass StorageClass @@ -745,20 +772,19 @@ type Destination struct { type Encryption struct { // The server-side encryption algorithm used when storing job results in Amazon S3 - // (for example, AES256, aws:kms). + // (for example, AES256, aws:kms ). // // This member is required. EncryptionType ServerSideEncryption - // If the encryption type is aws:kms, this optional value can be used to specify + // If the encryption type is aws:kms , this optional value can be used to specify // the encryption context for the restore results. KMSContext *string - // If the encryption type is aws:kms, this optional value specifies the ID of the - // symmetric customer managed key to use for encryption of job results. Amazon S3 - // only supports symmetric keys. For more information, see Using symmetric and - // asymmetric keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // If the encryption type is aws:kms , this optional value specifies the ID of the + // symmetric encryption customer managed key to use for encryption of job results. + // Amazon S3 only supports symmetric encryption KMS keys. For more information, see + // Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. KMSKeyId *string @@ -772,9 +798,8 @@ type EncryptionConfiguration struct { // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web // Services KMS key stored in Amazon Web Services Key Management Service (KMS) for // the destination bucket. Amazon S3 uses this key to encrypt replica objects. - // Amazon S3 only supports symmetric, customer managed KMS keys. For more - // information, see Using symmetric and asymmetric keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // Amazon S3 only supports symmetric encryption KMS keys. For more information, see + // Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. ReplicaKmsKeyID *string @@ -783,7 +808,7 @@ type EncryptionConfiguration struct { // A message that indicates the request is complete and no more messages will be // sent. You should not assume that the request is complete until the client -// receives an EndEvent. +// receives an EndEvent . type EndEvent struct { noSmithyDocumentSerde } @@ -793,934 +818,415 @@ type Error struct { // The error code is a string that uniquely identifies an error condition. It is // meant to be read and understood by programs that detect and handle errors by - // type. Amazon S3 error codes - // - // * Code: AccessDenied - // - // * Description: Access - // Denied - // - // * HTTP Status Code: 403 Forbidden - // - // * SOAP Fault Code Prefix: Client - // - // * - // Code: AccountProblem - // - // * Description: There is a problem with your Amazon Web - // Services account that prevents the action from completing successfully. Contact - // Amazon Web Services Support for further assistance. - // - // * HTTP Status Code: 403 - // Forbidden - // - // * SOAP Fault Code Prefix: Client - // - // * Code: AllAccessDisabled - // - // * - // Description: All access to this Amazon S3 resource has been disabled. Contact - // Amazon Web Services Support for further assistance. - // - // * HTTP Status Code: 403 - // Forbidden - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // AmbiguousGrantByEmailAddress - // - // * Description: The email address you provided is - // associated with more than one account. - // - // * HTTP Status Code: 400 Bad Request - // - // * - // SOAP Fault Code Prefix: Client - // - // * Code: AuthorizationHeaderMalformed - // - // * - // Description: The authorization header you provided is invalid. - // - // * HTTP Status - // Code: 400 Bad Request - // - // * HTTP Status Code: N/A - // - // * Code: BadDigest - // - // * - // Description: The Content-MD5 you specified did not match what we received. - // - // * - // HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // BucketAlreadyExists - // - // * Description: The requested bucket name is not available. - // The bucket namespace is shared by all users of the system. Please select a - // different name and try again. - // - // * HTTP Status Code: 409 Conflict - // - // * SOAP Fault - // Code Prefix: Client - // - // * Code: BucketAlreadyOwnedByYou - // - // * Description: The bucket - // you tried to create already exists, and you own it. Amazon S3 returns this error - // in all Amazon Web Services Regions except in the North Virginia Region. For - // legacy compatibility, if you re-create an existing bucket that you already own - // in the North Virginia Region, Amazon S3 returns 200 OK and resets the bucket - // access control lists (ACLs). - // - // * Code: 409 Conflict (in all Regions except the - // North Virginia Region) - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // BucketNotEmpty - // - // * Description: The bucket you tried to delete is not empty. - // - // * - // HTTP Status Code: 409 Conflict - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // CredentialsNotSupported - // - // * Description: This request does not support - // credentials. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: CrossLocationLoggingProhibited - // - // * Description: Cross-location - // logging not allowed. Buckets in one geographic location cannot log information - // to a bucket in another location. - // - // * HTTP Status Code: 403 Forbidden - // - // * SOAP - // Fault Code Prefix: Client - // - // * Code: EntityTooSmall - // - // * Description: Your proposed - // upload is smaller than the minimum allowed object size. - // - // * HTTP Status Code: 400 - // Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: EntityTooLarge - // - // * - // Description: Your proposed upload exceeds the maximum allowed object size. - // - // * - // HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // ExpiredToken - // - // * Description: The provided token has expired. - // - // * HTTP Status - // Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // IllegalVersioningConfigurationException - // - // * Description: Indicates that the - // versioning configuration specified in the request is invalid. - // - // * HTTP Status - // Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // IncompleteBody - // - // * Description: You did not provide the number of bytes specified - // by the Content-Length HTTP header - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP - // Fault Code Prefix: Client - // - // * Code: IncorrectNumberOfFilesInPostRequest - // - // * - // Description: POST requires exactly one file upload per request. - // - // * HTTP Status - // Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InlineDataTooLarge - // - // * Description: Inline data exceeds the maximum allowed - // size. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * - // Code: InternalError - // - // * Description: We encountered an internal error. Please try - // again. - // - // * HTTP Status Code: 500 Internal Server Error - // - // * SOAP Fault Code Prefix: - // Server - // - // * Code: InvalidAccessKeyId - // - // * Description: The Amazon Web Services - // access key ID you provided does not exist in our records. - // - // * HTTP Status Code: - // 403 Forbidden - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InvalidAddressingHeader - // - // * Description: You must specify the Anonymous role. - // - // * - // HTTP Status Code: N/A - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InvalidArgument - // - // * Description: Invalid Argument - // - // * HTTP Status Code: 400 Bad - // Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: InvalidBucketName - // - // * - // Description: The specified bucket is not valid. - // - // * HTTP Status Code: 400 Bad - // Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: InvalidBucketState - // - // * - // Description: The request is not valid with the current state of the bucket. - // - // * - // HTTP Status Code: 409 Conflict - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InvalidDigest - // - // * Description: The Content-MD5 you specified is not valid. - // - // * - // HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InvalidEncryptionAlgorithmError - // - // * Description: The encryption request you - // specified is not valid. The valid value is AES256. - // - // * HTTP Status Code: 400 Bad - // Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: InvalidLocationConstraint - // - // * - // Description: The specified location constraint is not valid. For more - // information about Regions, see How to Select a Region for Your Buckets - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). - // - // * - // HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InvalidObjectState - // - // * Description: The action is not valid for the current state - // of the object. - // - // * HTTP Status Code: 403 Forbidden - // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: InvalidPart - // - // * Description: One or more of the specified parts - // could not be found. The part might not have been uploaded, or the specified - // entity tag might not have matched the part's entity tag. - // - // * HTTP Status Code: - // 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: InvalidPartOrder - // - // * - // Description: The list of parts was not in ascending order. Parts list must be - // specified in order by part number. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP - // Fault Code Prefix: Client - // - // * Code: InvalidPayer - // - // * Description: All access to - // this object has been disabled. Please contact Amazon Web Services Support for - // further assistance. - // - // * HTTP Status Code: 403 Forbidden - // - // * SOAP Fault Code - // Prefix: Client - // - // * Code: InvalidPolicyDocument - // - // * Description: The content of the - // form does not meet the conditions specified in the policy document. - // - // * HTTP - // Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InvalidRange - // - // * Description: The requested range cannot be satisfied. - // - // * HTTP - // Status Code: 416 Requested Range Not Satisfiable - // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: InvalidRequest - // - // * Description: Please use AWS4-HMAC-SHA256. - // - // * - // HTTP Status Code: 400 Bad Request - // - // * Code: N/A - // - // * Code: InvalidRequest - // - // * - // Description: SOAP requests must be made over an HTTPS connection. - // - // * HTTP Status - // Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InvalidRequest - // - // * Description: Amazon S3 Transfer Acceleration is not supported - // for buckets with non-DNS compliant names. - // - // * HTTP Status Code: 400 Bad - // Request - // - // * Code: N/A - // - // * Code: InvalidRequest - // - // * Description: Amazon S3 Transfer - // Acceleration is not supported for buckets with periods (.) in their names. - // - // * - // HTTP Status Code: 400 Bad Request - // - // * Code: N/A - // - // * Code: InvalidRequest - // - // * - // Description: Amazon S3 Transfer Accelerate endpoint only supports virtual style - // requests. - // - // * HTTP Status Code: 400 Bad Request - // - // * Code: N/A - // - // * Code: - // InvalidRequest - // - // * Description: Amazon S3 Transfer Accelerate is not configured - // on this bucket. - // - // * HTTP Status Code: 400 Bad Request - // - // * Code: N/A - // - // * Code: - // InvalidRequest - // - // * Description: Amazon S3 Transfer Accelerate is disabled on this - // bucket. - // - // * HTTP Status Code: 400 Bad Request - // - // * Code: N/A - // - // * Code: - // InvalidRequest - // - // * Description: Amazon S3 Transfer Acceleration is not supported - // on this bucket. Contact Amazon Web Services Support for more information. - // - // * - // HTTP Status Code: 400 Bad Request - // - // * Code: N/A - // - // * Code: InvalidRequest - // - // * - // Description: Amazon S3 Transfer Acceleration cannot be enabled on this bucket. - // Contact Amazon Web Services Support for more information. - // - // * HTTP Status Code: - // 400 Bad Request - // - // * Code: N/A - // - // * Code: InvalidSecurity - // - // * Description: The - // provided security credentials are not valid. - // - // * HTTP Status Code: 403 - // Forbidden - // - // * SOAP Fault Code Prefix: Client - // - // * Code: InvalidSOAPRequest - // - // * - // Description: The SOAP request body is invalid. - // - // * HTTP Status Code: 400 Bad - // Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: InvalidStorageClass - // - // * - // Description: The storage class you specified is not valid. - // - // * HTTP Status Code: - // 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // InvalidTargetBucketForLogging - // - // * Description: The target bucket for logging does - // not exist, is not owned by you, or does not have the appropriate grants for the - // log-delivery group. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code - // Prefix: Client - // - // * Code: InvalidToken - // - // * Description: The provided token is - // malformed or otherwise invalid. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP - // Fault Code Prefix: Client - // - // * Code: InvalidURI - // - // * Description: Couldn't parse the - // specified URI. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: KeyTooLongError - // - // * Description: Your key is too long. - // - // * HTTP - // Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // MalformedACLError - // - // * Description: The XML you provided was not well-formed or - // did not validate against our published schema. - // - // * HTTP Status Code: 400 Bad - // Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: MalformedPOSTRequest - // - // * - // Description: The body of your POST request is not well-formed - // multipart/form-data. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code - // Prefix: Client - // - // * Code: MalformedXML - // - // * Description: This happens when the user - // sends malformed XML (XML that doesn't conform to the published XSD) for the - // configuration. The error message is, "The XML you provided was not well-formed - // or did not validate against our published schema." - // - // * HTTP Status Code: 400 Bad - // Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: MaxMessageLengthExceeded - // - // * - // Description: Your request was too big. - // - // * HTTP Status Code: 400 Bad Request - // - // * - // SOAP Fault Code Prefix: Client - // - // * Code: MaxPostPreDataLengthExceededError - // - // * - // Description: Your POST request fields preceding the upload file were too - // large. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * - // Code: MetadataTooLarge - // - // * Description: Your metadata headers exceed the maximum - // allowed metadata size. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code - // Prefix: Client - // - // * Code: MethodNotAllowed - // - // * Description: The specified method is - // not allowed against this resource. - // - // * HTTP Status Code: 405 Method Not - // Allowed - // - // * SOAP Fault Code Prefix: Client - // - // * Code: MissingAttachment - // - // * - // Description: A SOAP attachment was expected, but none were found. - // - // * HTTP Status - // Code: N/A - // - // * SOAP Fault Code Prefix: Client - // - // * Code: MissingContentLength - // - // * - // Description: You must provide the Content-Length HTTP header. - // - // * HTTP Status - // Code: 411 Length Required - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // MissingRequestBodyError - // - // * Description: This happens when the user sends an - // empty XML document as a request. The error message is, "Request body is - // empty." - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: MissingSecurityElement - // - // * Description: The SOAP 1.1 request is - // missing a security element. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault - // Code Prefix: Client - // - // * Code: MissingSecurityHeader - // - // * Description: Your request - // is missing a required header. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault - // Code Prefix: Client - // - // * Code: NoLoggingStatusForKey - // - // * Description: There is no - // such thing as a logging status subresource for a key. - // - // * HTTP Status Code: 400 - // Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: NoSuchBucket - // - // * - // Description: The specified bucket does not exist. - // - // * HTTP Status Code: 404 Not - // Found - // - // * SOAP Fault Code Prefix: Client - // - // * Code: NoSuchBucketPolicy - // - // * - // Description: The specified bucket does not have a bucket policy. - // - // * HTTP Status - // Code: 404 Not Found - // - // * SOAP Fault Code Prefix: Client - // - // * Code: NoSuchKey - // - // * - // Description: The specified key does not exist. - // - // * HTTP Status Code: 404 Not - // Found - // - // * SOAP Fault Code Prefix: Client - // - // * Code: NoSuchLifecycleConfiguration - // - // * - // Description: The lifecycle configuration does not exist. - // - // * HTTP Status Code: - // 404 Not Found - // - // * SOAP Fault Code Prefix: Client - // - // * Code: NoSuchUpload - // - // * - // Description: The specified multipart upload does not exist. The upload ID might - // be invalid, or the multipart upload might have been aborted or completed. - // - // * - // HTTP Status Code: 404 Not Found - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // NoSuchVersion - // - // * Description: Indicates that the version ID specified in the - // request does not match an existing version. - // - // * HTTP Status Code: 404 Not - // Found - // - // * SOAP Fault Code Prefix: Client - // - // * Code: NotImplemented - // - // * Description: - // A header you provided implies functionality that is not implemented. - // - // * HTTP - // Status Code: 501 Not Implemented - // - // * SOAP Fault Code Prefix: Server - // - // * Code: - // NotSignedUp - // - // * Description: Your account is not signed up for the Amazon S3 - // service. You must sign up before you can use Amazon S3. You can sign up at the - // following URL: Amazon S3 (http://aws.amazon.com/s3) - // - // * HTTP Status Code: 403 - // Forbidden - // - // * SOAP Fault Code Prefix: Client - // - // * Code: OperationAborted - // - // * - // Description: A conflicting conditional action is currently in progress against - // this resource. Try again. - // - // * HTTP Status Code: 409 Conflict - // - // * SOAP Fault Code - // Prefix: Client - // - // * Code: PermanentRedirect - // - // * Description: The bucket you are - // attempting to access must be addressed using the specified endpoint. Send all - // future requests to this endpoint. - // - // * HTTP Status Code: 301 Moved Permanently - // - // * - // SOAP Fault Code Prefix: Client - // - // * Code: PreconditionFailed - // - // * Description: At - // least one of the preconditions you specified did not hold. - // - // * HTTP Status Code: - // 412 Precondition Failed - // - // * SOAP Fault Code Prefix: Client - // - // * Code: Redirect - // - // * - // Description: Temporary redirect. - // - // * HTTP Status Code: 307 Moved Temporarily - // - // * - // SOAP Fault Code Prefix: Client - // - // * Code: RestoreAlreadyInProgress - // - // * Description: - // Object restore is already in progress. - // - // * HTTP Status Code: 409 Conflict - // - // * SOAP - // Fault Code Prefix: Client - // - // * Code: RequestIsNotMultiPartContent - // - // * Description: - // Bucket POST must be of the enclosure-type multipart/form-data. - // - // * HTTP Status - // Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // RequestTimeout - // - // * Description: Your socket connection to the server was not read - // from or written to within the timeout period. - // - // * HTTP Status Code: 400 Bad - // Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: RequestTimeTooSkewed - // - // * - // Description: The difference between the request time and the server's time is - // too large. - // - // * HTTP Status Code: 403 Forbidden - // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: RequestTorrentOfBucketError - // - // * Description: Requesting the - // torrent file of a bucket is not permitted. - // - // * HTTP Status Code: 400 Bad - // Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: SignatureDoesNotMatch - // - // * - // Description: The request signature we calculated does not match the signature - // you provided. Check your Amazon Web Services secret access key and signing - // method. For more information, see REST Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) and - // SOAP Authentication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) for - // details. - // - // * HTTP Status Code: 403 Forbidden - // - // * SOAP Fault Code Prefix: Client - // - // * - // Code: ServiceUnavailable - // - // * Description: Reduce your request rate. - // - // * HTTP - // Status Code: 503 Service Unavailable - // - // * SOAP Fault Code Prefix: Server - // - // * Code: - // SlowDown - // - // * Description: Reduce your request rate. - // - // * HTTP Status Code: 503 Slow - // Down - // - // * SOAP Fault Code Prefix: Server - // - // * Code: TemporaryRedirect - // - // * - // Description: You are being redirected to the bucket while DNS updates. - // - // * HTTP - // Status Code: 307 Moved Temporarily - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // TokenRefreshRequired - // - // * Description: The provided token must be refreshed. - // - // * - // HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // TooManyBuckets - // - // * Description: You have attempted to create more buckets than - // allowed. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: UnexpectedContent - // - // * Description: This request does not support - // content. - // - // * HTTP Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: - // Client - // - // * Code: UnresolvableGrantByEmailAddress - // - // * Description: The email - // address you provided does not match any account on record. - // - // * HTTP Status Code: - // 400 Bad Request - // - // * SOAP Fault Code Prefix: Client - // - // * Code: - // UserKeyMustBeSpecified - // - // * Description: The bucket POST must contain the - // specified field name. If it is specified, check the order of the fields. - // - // * HTTP - // Status Code: 400 Bad Request - // - // * SOAP Fault Code Prefix: Client + // type. The following is a list of Amazon S3 error codes. For more information, + // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) + // . + // - Code: AccessDenied + // - Description: Access Denied + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: AccountProblem + // - Description: There is a problem with your Amazon Web Services account that + // prevents the action from completing successfully. Contact Amazon Web Services + // Support for further assistance. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: AllAccessDisabled + // - Description: All access to this Amazon S3 resource has been disabled. + // Contact Amazon Web Services Support for further assistance. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: AmbiguousGrantByEmailAddress + // - Description: The email address you provided is associated with more than + // one account. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: AuthorizationHeaderMalformed + // - Description: The authorization header you provided is invalid. + // - HTTP Status Code: 400 Bad Request + // - HTTP Status Code: N/A + // - Code: BadDigest + // - Description: The Content-MD5 you specified did not match what we received. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: BucketAlreadyExists + // - Description: The requested bucket name is not available. The bucket + // namespace is shared by all users of the system. Please select a different name + // and try again. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: BucketAlreadyOwnedByYou + // - Description: The bucket you tried to create already exists, and you own it. + // Amazon S3 returns this error in all Amazon Web Services Regions except in the + // North Virginia Region. For legacy compatibility, if you re-create an existing + // bucket that you already own in the North Virginia Region, Amazon S3 returns 200 + // OK and resets the bucket access control lists (ACLs). + // - Code: 409 Conflict (in all Regions except the North Virginia Region) + // - SOAP Fault Code Prefix: Client + // - Code: BucketNotEmpty + // - Description: The bucket you tried to delete is not empty. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: CredentialsNotSupported + // - Description: This request does not support credentials. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: CrossLocationLoggingProhibited + // - Description: Cross-location logging not allowed. Buckets in one geographic + // location cannot log information to a bucket in another location. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: EntityTooSmall + // - Description: Your proposed upload is smaller than the minimum allowed + // object size. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: EntityTooLarge + // - Description: Your proposed upload exceeds the maximum allowed object size. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: ExpiredToken + // - Description: The provided token has expired. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: IllegalVersioningConfigurationException + // - Description: Indicates that the versioning configuration specified in the + // request is invalid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: IncompleteBody + // - Description: You did not provide the number of bytes specified by the + // Content-Length HTTP header + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: IncorrectNumberOfFilesInPostRequest + // - Description: POST requires exactly one file upload per request. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InlineDataTooLarge + // - Description: Inline data exceeds the maximum allowed size. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InternalError + // - Description: We encountered an internal error. Please try again. + // - HTTP Status Code: 500 Internal Server Error + // - SOAP Fault Code Prefix: Server + // - Code: InvalidAccessKeyId + // - Description: The Amazon Web Services access key ID you provided does not + // exist in our records. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: InvalidAddressingHeader + // - Description: You must specify the Anonymous role. + // - HTTP Status Code: N/A + // - SOAP Fault Code Prefix: Client + // - Code: InvalidArgument + // - Description: Invalid Argument + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidBucketName + // - Description: The specified bucket is not valid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidBucketState + // - Description: The request is not valid with the current state of the bucket. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: InvalidDigest + // - Description: The Content-MD5 you specified is not valid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidEncryptionAlgorithmError + // - Description: The encryption request you specified is not valid. The valid + // value is AES256. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidLocationConstraint + // - Description: The specified location constraint is not valid. For more + // information about Regions, see How to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // . + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidObjectState + // - Description: The action is not valid for the current state of the object. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: InvalidPart + // - Description: One or more of the specified parts could not be found. The + // part might not have been uploaded, or the specified entity tag might not have + // matched the part's entity tag. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidPartOrder + // - Description: The list of parts was not in ascending order. Parts list must + // be specified in order by part number. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidPayer + // - Description: All access to this object has been disabled. Please contact + // Amazon Web Services Support for further assistance. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: InvalidPolicyDocument + // - Description: The content of the form does not meet the conditions specified + // in the policy document. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidRange + // - Description: The requested range cannot be satisfied. + // - HTTP Status Code: 416 Requested Range Not Satisfiable + // - SOAP Fault Code Prefix: Client + // - Code: InvalidRequest + // - Description: Please use AWS4-HMAC-SHA256 . + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: SOAP requests must be made over an HTTPS connection. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Acceleration is not supported for buckets + // with non-DNS compliant names. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Acceleration is not supported for buckets + // with periods (.) in their names. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Accelerate endpoint only supports virtual + // style requests. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Accelerate is not configured on this + // bucket. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Accelerate is disabled on this bucket. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Acceleration is not supported on this + // bucket. Contact Amazon Web Services Support for more information. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidRequest + // - Description: Amazon S3 Transfer Acceleration cannot be enabled on this + // bucket. Contact Amazon Web Services Support for more information. + // - HTTP Status Code: 400 Bad Request + // - Code: N/A + // - Code: InvalidSecurity + // - Description: The provided security credentials are not valid. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: InvalidSOAPRequest + // - Description: The SOAP request body is invalid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidStorageClass + // - Description: The storage class you specified is not valid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidTargetBucketForLogging + // - Description: The target bucket for logging does not exist, is not owned by + // you, or does not have the appropriate grants for the log-delivery group. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidToken + // - Description: The provided token is malformed or otherwise invalid. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: InvalidURI + // - Description: Couldn't parse the specified URI. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: KeyTooLongError + // - Description: Your key is too long. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MalformedACLError + // - Description: The XML you provided was not well-formed or did not validate + // against our published schema. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MalformedPOSTRequest + // - Description: The body of your POST request is not well-formed + // multipart/form-data. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MalformedXML + // - Description: This happens when the user sends malformed XML (XML that + // doesn't conform to the published XSD) for the configuration. The error message + // is, "The XML you provided was not well-formed or did not validate against our + // published schema." + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MaxMessageLengthExceeded + // - Description: Your request was too big. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MaxPostPreDataLengthExceededError + // - Description: Your POST request fields preceding the upload file were too + // large. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MetadataTooLarge + // - Description: Your metadata headers exceed the maximum allowed metadata + // size. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MethodNotAllowed + // - Description: The specified method is not allowed against this resource. + // - HTTP Status Code: 405 Method Not Allowed + // - SOAP Fault Code Prefix: Client + // - Code: MissingAttachment + // - Description: A SOAP attachment was expected, but none were found. + // - HTTP Status Code: N/A + // - SOAP Fault Code Prefix: Client + // - Code: MissingContentLength + // - Description: You must provide the Content-Length HTTP header. + // - HTTP Status Code: 411 Length Required + // - SOAP Fault Code Prefix: Client + // - Code: MissingRequestBodyError + // - Description: This happens when the user sends an empty XML document as a + // request. The error message is, "Request body is empty." + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MissingSecurityElement + // - Description: The SOAP 1.1 request is missing a security element. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: MissingSecurityHeader + // - Description: Your request is missing a required header. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: NoLoggingStatusForKey + // - Description: There is no such thing as a logging status subresource for a + // key. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchBucket + // - Description: The specified bucket does not exist. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchBucketPolicy + // - Description: The specified bucket does not have a bucket policy. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchKey + // - Description: The specified key does not exist. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchLifecycleConfiguration + // - Description: The lifecycle configuration does not exist. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchUpload + // - Description: The specified multipart upload does not exist. The upload ID + // might be invalid, or the multipart upload might have been aborted or completed. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NoSuchVersion + // - Description: Indicates that the version ID specified in the request does + // not match an existing version. + // - HTTP Status Code: 404 Not Found + // - SOAP Fault Code Prefix: Client + // - Code: NotImplemented + // - Description: A header you provided implies functionality that is not + // implemented. + // - HTTP Status Code: 501 Not Implemented + // - SOAP Fault Code Prefix: Server + // - Code: NotSignedUp + // - Description: Your account is not signed up for the Amazon S3 service. You + // must sign up before you can use Amazon S3. You can sign up at the following URL: + // Amazon S3 (http://aws.amazon.com/s3) + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: OperationAborted + // - Description: A conflicting conditional action is currently in progress + // against this resource. Try again. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: PermanentRedirect + // - Description: The bucket you are attempting to access must be addressed + // using the specified endpoint. Send all future requests to this endpoint. + // - HTTP Status Code: 301 Moved Permanently + // - SOAP Fault Code Prefix: Client + // - Code: PreconditionFailed + // - Description: At least one of the preconditions you specified did not hold. + // - HTTP Status Code: 412 Precondition Failed + // - SOAP Fault Code Prefix: Client + // - Code: Redirect + // - Description: Temporary redirect. + // - HTTP Status Code: 307 Moved Temporarily + // - SOAP Fault Code Prefix: Client + // - Code: RestoreAlreadyInProgress + // - Description: Object restore is already in progress. + // - HTTP Status Code: 409 Conflict + // - SOAP Fault Code Prefix: Client + // - Code: RequestIsNotMultiPartContent + // - Description: Bucket POST must be of the enclosure-type multipart/form-data. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: RequestTimeout + // - Description: Your socket connection to the server was not read from or + // written to within the timeout period. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: RequestTimeTooSkewed + // - Description: The difference between the request time and the server's time + // is too large. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: RequestTorrentOfBucketError + // - Description: Requesting the torrent file of a bucket is not permitted. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: SignatureDoesNotMatch + // - Description: The request signature we calculated does not match the + // signature you provided. Check your Amazon Web Services secret access key and + // signing method. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. + // - HTTP Status Code: 403 Forbidden + // - SOAP Fault Code Prefix: Client + // - Code: ServiceUnavailable + // - Description: Service is unable to handle request. + // - HTTP Status Code: 503 Service Unavailable + // - SOAP Fault Code Prefix: Server + // - Code: SlowDown + // - Description: Reduce your request rate. + // - HTTP Status Code: 503 Slow Down + // - SOAP Fault Code Prefix: Server + // - Code: TemporaryRedirect + // - Description: You are being redirected to the bucket while DNS updates. + // - HTTP Status Code: 307 Moved Temporarily + // - SOAP Fault Code Prefix: Client + // - Code: TokenRefreshRequired + // - Description: The provided token must be refreshed. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: TooManyBuckets + // - Description: You have attempted to create more buckets than allowed. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: UnexpectedContent + // - Description: This request does not support content. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: UnresolvableGrantByEmailAddress + // - Description: The email address you provided does not match any account on + // record. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client + // - Code: UserKeyMustBeSpecified + // - Description: The bucket POST must contain the specified field name. If it + // is specified, check the order of the fields. + // - HTTP Status Code: 400 Bad Request + // - SOAP Fault Code Prefix: Client Code *string // The error key. @@ -1734,7 +1240,8 @@ type Error struct { // error message. Message *string - // The version ID of the error. + // The version ID of the error. This functionality is not supported for directory + // buckets. VersionId *string noSmithyDocumentSerde @@ -1745,9 +1252,9 @@ type ErrorDocument struct { // The object key name to use when a 4XX class error occurs. Replacement must be // made for object keys containing special characters (such as carriage returns) - // when using XML requests. For more information, see XML related object key - // constraints - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // when using XML requests. For more information, see XML related object key + // constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . // // This member is required. Key *string @@ -1761,12 +1268,11 @@ type EventBridgeConfiguration struct { } // Optional configuration to replicate existing source bucket objects. For more -// information, see Replicating Existing Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) // in the Amazon S3 User Guide. type ExistingObjectReplication struct { - // + // Specifies whether Amazon S3 replicates existing source bucket objects. // // This member is required. Status ExistingObjectReplicationStatus @@ -1781,9 +1287,8 @@ type FilterRule struct { // The object key name prefix or suffix identifying one or more objects to which // the filtering rule applies. The maximum length is 1,024 characters. Overlapping // prefixes and suffixes are not supported. For more information, see Configuring - // Event Notifications - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the - // Amazon S3 User Guide. + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. Name FilterRuleName // The value that the filter searches for in object key names. @@ -1798,10 +1303,10 @@ type GetObjectAttributesParts struct { // Indicates whether the returned list of parts is truncated. A value of true // indicates that the list was truncated. A list can be truncated if the number of // parts exceeds the limit returned in the MaxParts element. - IsTruncated bool + IsTruncated *bool // The maximum number of parts allowed in the response. - MaxParts int32 + MaxParts *int32 // When a list is truncated, this element specifies the last part in the list, as // well as the value to use for the PartNumberMarker request parameter in a @@ -1813,10 +1318,17 @@ type GetObjectAttributesParts struct { // A container for elements related to a particular part. A response can contain // zero or more Parts elements. + // - General purpose buckets - For GetObjectAttributes , if a additional checksum + // (including x-amz-checksum-crc32 , x-amz-checksum-crc32c , x-amz-checksum-sha1 + // , or x-amz-checksum-sha256 ) isn't applied to the object specified in the + // request, the response doesn't return Part . + // - Directory buckets - For GetObjectAttributes , no matter whether a additional + // checksum is applied to the object specified in the request, the response returns + // Part . Parts []ObjectPart // The total number of parts. - TotalPartsCount int32 + TotalPartsCount *int32 noSmithyDocumentSerde } @@ -1855,32 +1367,19 @@ type Grantee struct { // Screen name of the grantee. DisplayName *string - // Email address of the grantee. Using email addresses to specify a grantee is only - // supported in the following Amazon Web Services Regions: - // - // * US East (N. - // Virginia) - // - // * US West (N. California) - // - // * US West (Oregon) - // - // * Asia Pacific - // (Singapore) - // - // * Asia Pacific (Sydney) - // - // * Asia Pacific (Tokyo) - // - // * Europe - // (Ireland) - // - // * South America (São Paulo) - // - // For a list of all the Amazon S3 - // supported Regions and endpoints, see Regions and Endpoints - // (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the - // Amazon Web Services General Reference. + // Email address of the grantee. Using email addresses to specify a grantee is + // only supported in the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. EmailAddress *string // The canonical user ID of the grantee. @@ -1901,8 +1400,8 @@ type IndexDocument struct { // key name images/index.html) The suffix must not be empty and must not include a // slash character. Replacement must be made for object keys containing special // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . // // This member is required. Suffix *string @@ -1913,11 +1412,15 @@ type IndexDocument struct { // Container element that identifies who initiated the multipart upload. type Initiator struct { - // Name of the Principal. + // Name of the Principal. This functionality is not supported for directory + // buckets. DisplayName *string // If the principal is an Amazon Web Services account, it provides the Canonical // User ID. If the principal is an IAM User, it provides a user ARN value. + // Directory buckets - If the principal is an Amazon Web Services account, it + // provides the Amazon Web Services account ID. If the principal is an IAM User, it + // provides a user ARN value. ID *string noSmithyDocumentSerde @@ -1942,8 +1445,8 @@ type InputSerialization struct { noSmithyDocumentSerde } -// A container for specifying S3 Intelligent-Tiering filters. The filters determine -// the subset of objects to which the rule applies. +// A container for specifying S3 Intelligent-Tiering filters. The filters +// determine the subset of objects to which the rule applies. type IntelligentTieringAndOperator struct { // An object key name prefix that identifies the subset of objects to which the @@ -1959,8 +1462,8 @@ type IntelligentTieringAndOperator struct { // Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. For // information about the S3 Intelligent-Tiering storage class, see Storage class -// for automatically optimizing frequently and infrequently accessed objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// for automatically optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) +// . type IntelligentTieringConfiguration struct { // The ID used to identify the S3 Intelligent-Tiering configuration. @@ -1978,8 +1481,8 @@ type IntelligentTieringConfiguration struct { // This member is required. Tierings []Tiering - // Specifies a bucket filter. The configuration only includes objects that meet the - // filter's criteria. + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. Filter *IntelligentTieringFilter noSmithyDocumentSerde @@ -1989,16 +1492,16 @@ type IntelligentTieringConfiguration struct { // configuration applies to. type IntelligentTieringFilter struct { - // A conjunction (logical AND) of predicates, which is used in evaluating a metrics - // filter. The operator must have at least two predicates, and an object must match - // all of the predicates in order for the filter to apply. + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. And *IntelligentTieringAndOperator // An object key name prefix that identifies the subset of objects to which the // rule applies. Replacement must be made for object keys containing special // characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . Prefix *string // A container of a key value name pair. @@ -2008,8 +1511,7 @@ type IntelligentTieringFilter struct { } // Specifies the inventory configuration for an Amazon S3 bucket. For more -// information, see GET Bucket inventory -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// information, see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) // in the Amazon S3 API Reference. type InventoryConfiguration struct { @@ -2023,27 +1525,27 @@ type InventoryConfiguration struct { // This member is required. Id *string - // Object versions to include in the inventory list. If set to All, the list + // Object versions to include in the inventory list. If set to All , the list // includes all the object versions, which adds the version-related fields - // VersionId, IsLatest, and DeleteMarker to the list. If set to Current, the list - // does not contain these version-related fields. + // VersionId , IsLatest , and DeleteMarker to the list. If set to Current , the + // list does not contain these version-related fields. // // This member is required. IncludedObjectVersions InventoryIncludedObjectVersions - // Specifies whether the inventory is enabled or disabled. If set to True, an - // inventory list is generated. If set to False, no inventory list is generated. + // Specifies whether the inventory is enabled or disabled. If set to True , an + // inventory list is generated. If set to False , no inventory list is generated. // // This member is required. - IsEnabled bool + IsEnabled *bool // Specifies the schedule for generating inventory results. // // This member is required. Schedule *InventorySchedule - // Specifies an inventory filter. The inventory only includes objects that meet the - // filter's criteria. + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. Filter *InventoryFilter // Contains the optional fields that are included in the inventory results. @@ -2077,8 +1579,8 @@ type InventoryEncryption struct { noSmithyDocumentSerde } -// Specifies an inventory filter. The inventory only includes objects that meet the -// filter's criteria. +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. type InventoryFilter struct { // The prefix that an object must have to be included in the inventory results. @@ -2154,9 +1656,8 @@ type JSONOutput struct { type LambdaFunctionConfiguration struct { // The Amazon S3 bucket event for which to invoke the Lambda function. For more - // information, see Supported Event Types - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the - // Amazon S3 User Guide. + // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. // // This member is required. Events []Event @@ -2168,9 +1669,8 @@ type LambdaFunctionConfiguration struct { LambdaFunctionArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the - // Amazon S3 User Guide. + // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -2180,27 +1680,31 @@ type LambdaFunctionConfiguration struct { noSmithyDocumentSerde } -// Container for the expiration for the lifecycle of the object. +// Container for the expiration for the lifecycle of the object. For more +// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. type LifecycleExpiration struct { - // Indicates at what date the object is to be moved or deleted. Should be in GMT - // ISO 8601 Format. + // Indicates at what date the object is to be moved or deleted. The date value + // must conform to the ISO 8601 format. The time is always midnight UTC. Date *time.Time // Indicates the lifetime, in days, of the objects that are subject to the rule. // The value must be a non-zero positive integer. - Days int32 + Days *int32 // Indicates whether Amazon S3 will remove a delete marker with no noncurrent // versions. If set to true, the delete marker will be expired; if set to false the // policy takes no action. This cannot be specified with Days or Date in a // Lifecycle Expiration Policy. - ExpiredObjectDeleteMarker bool + ExpiredObjectDeleteMarker *bool noSmithyDocumentSerde } -// A lifecycle rule for individual objects in an Amazon S3 bucket. +// A lifecycle rule for individual objects in an Amazon S3 bucket. For more +// information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. type LifecycleRule struct { // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule is @@ -2211,9 +1715,8 @@ type LifecycleRule struct { // Specifies the days since the initiation of an incomplete multipart upload that // Amazon S3 will wait before permanently removing all parts of the upload. For - // more information, see Aborting Incomplete Multipart Uploads Using a Bucket - // Lifecycle Policy - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) // in the Amazon S3 User Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload @@ -2222,7 +1725,7 @@ type LifecycleRule struct { Expiration *LifecycleExpiration // The Filter is used to identify objects that a Lifecycle Rule applies to. A - // Filter must have exactly one of Prefix, Tag, or And specified. Filter is + // Filter must have exactly one of Prefix , Tag , or And specified. Filter is // required if the LifecycleRule does not contain a Prefix element. Filter LifecycleRuleFilter @@ -2246,8 +1749,8 @@ type LifecycleRule struct { // Prefix identifying one or more objects to which the rule applies. This is no // longer used; use Filter instead. Replacement must be made for object keys // containing special characters (such as carriage returns) when using XML - // requests. For more information, see XML related object key constraints - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . // // Deprecated: This member has been deprecated. Prefix *string @@ -2264,10 +1767,10 @@ type LifecycleRule struct { type LifecycleRuleAndOperator struct { // Minimum object size to which the rule applies. - ObjectSizeGreaterThan int64 + ObjectSizeGreaterThan *int64 // Maximum object size to which the rule applies. - ObjectSizeLessThan int64 + ObjectSizeLessThan *int64 // Prefix identifying one or more objects to which the rule applies. Prefix *string @@ -2280,7 +1783,7 @@ type LifecycleRuleAndOperator struct { } // The Filter is used to identify objects that a Lifecycle Rule applies to. A -// Filter must have exactly one of Prefix, Tag, or And specified. +// Filter must have exactly one of Prefix , Tag , or And specified. // // The following types satisfy this interface: // @@ -2324,9 +1827,9 @@ func (*LifecycleRuleFilterMemberObjectSizeLessThan) isLifecycleRuleFilter() {} // Prefix identifying one or more objects to which the rule applies. Replacement // must be made for object keys containing special characters (such as carriage -// returns) when using XML requests. For more information, see XML related object -// key constraints -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). +// returns) when using XML requests. For more information, see XML related object +// key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) +// . type LifecycleRuleFilterMemberPrefix struct { Value string @@ -2344,10 +1847,27 @@ type LifecycleRuleFilterMemberTag struct { func (*LifecycleRuleFilterMemberTag) isLifecycleRuleFilter() {} -// Describes where logs are stored and the prefix that Amazon S3 assigns to all log -// object keys for a bucket. For more information, see PUT Bucket logging -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) in -// the Amazon S3 API Reference. +// Specifies the location where the bucket will be created. For directory buckets, +// the location type is Availability Zone. For more information about directory +// buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. This functionality is only supported by directory +// buckets. +type LocationInfo struct { + + // The name of the location where the bucket will be created. For directory + // buckets, the AZ ID of the Availability Zone where the bucket will be created. An + // example AZ ID value is usw2-az2 . + Name *string + + // The type of location where the bucket will be created. + Type LocationType + + noSmithyDocumentSerde +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to all +// log object keys for a bucket. For more information, see PUT Bucket logging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon S3 API Reference. type LoggingEnabled struct { // Specifies the bucket where you want Amazon S3 to store server access logs. You @@ -2360,37 +1880,39 @@ type LoggingEnabled struct { // This member is required. TargetBucket *string - // A prefix for all log object keys. If you store log files from multiple Amazon S3 - // buckets in a single bucket, you can use a prefix to distinguish which log files - // came from which bucket. + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which log + // files came from which bucket. // // This member is required. TargetPrefix *string // Container for granting information. Buckets that use the bucket owner enforced // setting for Object Ownership don't support target grants. For more information, - // see Permissions for server access log delivery - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) + // see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) // in the Amazon S3 User Guide. TargetGrants []TargetGrant + // Amazon S3 key format for log objects. + TargetObjectKeyFormat *TargetObjectKeyFormat + noSmithyDocumentSerde } // A metadata key-value pair to store with an object. type MetadataEntry struct { - // Name of the Object. + // Name of the object. Name *string - // Value of the Object. + // Value of the object. Value *string noSmithyDocumentSerde } -// A container specifying replication metrics-related settings enabling replication -// metrics and events. +// A container specifying replication metrics-related settings enabling +// replication metrics and events. type Metrics struct { // Specifies whether the replication metrics are enabled. @@ -2405,9 +1927,9 @@ type Metrics struct { noSmithyDocumentSerde } -// A conjunction (logical AND) of predicates, which is used in evaluating a metrics -// filter. The operator must have at least two predicates, and an object must match -// all of the predicates in order for the filter to apply. +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. type MetricsAndOperator struct { // The access point ARN used when evaluating an AND predicate. @@ -2426,11 +1948,12 @@ type MetricsAndOperator struct { // by the metrics configuration ID) from an Amazon S3 bucket. If you're updating an // existing metrics configuration, note that this is a full replacement of the // existing metrics configuration. If you don't include the elements you want to -// keep, they are erased. For more information, see PutBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). +// keep, they are erased. For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// . type MetricsConfiguration struct { - // The ID used to identify the metrics configuration. + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. // // This member is required. Id *string @@ -2446,8 +1969,8 @@ type MetricsConfiguration struct { // Specifies a metrics configuration filter. The metrics configuration only // includes objects that meet the filter's criteria. A filter must be a prefix, an // object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more -// information, see PutBucketMetricsConfiguration -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). +// information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// . // // The following types satisfy this interface: // @@ -2468,9 +1991,9 @@ type MetricsFilterMemberAccessPointArn struct { func (*MetricsFilterMemberAccessPointArn) isMetricsFilter() {} -// A conjunction (logical AND) of predicates, which is used in evaluating a metrics -// filter. The operator must have at least two predicates, and an object must match -// all of the predicates in order for the filter to apply. +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. type MetricsFilterMemberAnd struct { Value MetricsAndOperator @@ -2513,9 +2036,13 @@ type MultipartUpload struct { Key *string // Specifies the owner of the object that is part of the multipart upload. + // Directory buckets - The bucket owner is returned as the object owner for all the + // objects. Owner *Owner - // The class of storage used to store the object. + // The class of storage used to store the object. Directory buckets - Only the S3 + // Express One Zone storage class is supported by directory buckets to store + // objects. StorageClass StorageClass // Upload ID that identifies the multipart upload. @@ -2531,49 +2058,45 @@ type MultipartUpload struct { // the object's lifetime. type NoncurrentVersionExpiration struct { - // Specifies how many noncurrent versions Amazon S3 will retain. If there are this - // many more recent noncurrent versions, Amazon S3 will take the associated action. - // For more information about noncurrent versions, see Lifecycle configuration - // elements - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // Specifies how many newer noncurrent versions must exist before Amazon S3 can + // perform the associated action on a given version. If there are this many more + // recent noncurrent versions, Amazon S3 will take the associated action. For more + // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. - NewerNoncurrentVersions int32 + NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. The value must be a non-zero positive integer. // For information about the noncurrent days calculations, see How Amazon S3 - // Calculates When an Object Became Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon S3 User Guide. - NoncurrentDays int32 + NoncurrentDays *int32 noSmithyDocumentSerde } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, -// GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or -// versioning is suspended), you can set this action to request that Amazon S3 -// transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, -// INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at a +// transition to the STANDARD_IA , ONEZONE_IA , INTELLIGENT_TIERING , GLACIER_IR , +// GLACIER , or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled +// (or versioning is suspended), you can set this action to request that Amazon S3 +// transition noncurrent object versions to the STANDARD_IA , ONEZONE_IA , +// INTELLIGENT_TIERING , GLACIER_IR , GLACIER , or DEEP_ARCHIVE storage class at a // specific period in the object's lifetime. type NoncurrentVersionTransition struct { - // Specifies how many noncurrent versions Amazon S3 will retain. If there are this - // many more recent noncurrent versions, Amazon S3 will take the associated action. - // For more information about noncurrent versions, see Lifecycle configuration - // elements - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // Specifies how many newer noncurrent versions must exist before Amazon S3 can + // perform the associated action on a given version. If there are this many more + // recent noncurrent versions, Amazon S3 will take the associated action. For more + // information about noncurrent versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. - NewerNoncurrentVersions int32 + NewerNoncurrentVersions *int32 // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates How Long an Object Has Been - // Noncurrent - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) // in the Amazon S3 User Guide. - NoncurrentDays int32 + NoncurrentDays *int32 // The class of storage used to store the object. StorageClass TransitionStorageClass @@ -2581,8 +2104,8 @@ type NoncurrentVersionTransition struct { noSmithyDocumentSerde } -// A container for specifying the notification configuration of the bucket. If this -// element is empty, notifications are turned off for the bucket. +// A container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off for the bucket. type NotificationConfiguration struct { // Enables delivery of events to Amazon EventBridge. @@ -2592,21 +2115,20 @@ type NotificationConfiguration struct { // them. LambdaFunctionConfigurations []LambdaFunctionConfiguration - // The Amazon Simple Queue Service queues to publish messages to and the events for - // which to publish messages. + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. QueueConfigurations []QueueConfiguration - // The topic to which notifications are sent and the events for which notifications - // are generated. + // The topic to which notifications are sent and the events for which + // notifications are generated. TopicConfigurations []TopicConfiguration noSmithyDocumentSerde } // Specifies object key name filtering rules. For information about key name -// filtering, see Configuring Event Notifications -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the -// Amazon S3 User Guide. +// filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) +// in the Amazon S3 User Guide. type NotificationConfigurationFilter struct { // A container for object key name prefix and suffix filtering rules. @@ -2625,22 +2147,18 @@ type Object struct { // contents of an object, not its metadata. The ETag may or may not be an MD5 // digest of the object data. Whether or not it is depends on how the object was // created and how it is encrypted as described below: - // - // * Objects created by the - // PUT Object, POST Object, or Copy operation, or through the Amazon Web Services - // Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that - // are an MD5 digest of their object data. - // - // * Objects created by the PUT Object, - // POST Object, or Copy operation, or through the Amazon Web Services Management - // Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5 - // digest of their object data. - // - // * If an object is created by either the Multipart - // Upload or Part Copy operation, the ETag is not an MD5 digest, regardless of the - // method of encryption. If an object is larger than 16 MB, the Amazon Web Services - // Management Console will upload or copy that object as a Multipart Upload, and - // therefore the ETag will not be an MD5 digest. + // - Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted by SSE-S3 + // or plaintext, have ETags that are an MD5 digest of their object data. + // - Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted by SSE-C + // or SSE-KMS, have ETags that are not an MD5 digest of their object data. + // - If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. If an object is larger than 16 MB, the Amazon Web Services + // Management Console will upload or copy that object as a Multipart Upload, and + // therefore the ETag will not be an MD5 digest. + // Directory buckets - MD5 is not supported by directory buckets. ETag *string // The name that you assign to an object. You use the object key to retrieve the @@ -2650,13 +2168,25 @@ type Object struct { // Creation date of the object. LastModified *time.Time - // The owner of the object + // The owner of the object Directory buckets - The bucket owner is returned as the + // object owner. Owner *Owner + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. Only the S3 Express One Zone storage class is supported by directory + // buckets to store objects. + RestoreStatus *RestoreStatus + // Size in bytes of the object - Size int64 + Size *int64 - // The class of storage used to store the object. + // The class of storage used to store the object. Directory buckets - Only the S3 + // Express One Zone storage class is supported by directory buckets to store + // objects. StorageClass ObjectStorageClass noSmithyDocumentSerde @@ -2667,13 +2197,14 @@ type ObjectIdentifier struct { // Key name of the object. Replacement must be made for object keys containing // special characters (such as carriage returns) when using XML requests. For more - // information, see XML related object key constraints - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . // // This member is required. Key *string - // VersionId for the specific version of the object to delete. + // Version ID for the specific version of the object to delete. This functionality + // is not supported for directory buckets. VersionId *string noSmithyDocumentSerde @@ -2687,9 +2218,9 @@ type ObjectLockConfiguration struct { ObjectLockEnabled ObjectLockEnabled // Specifies the Object Lock rule for the specified object. Enable the this rule - // when you apply ObjectLockConfiguration to a bucket. Bucket settings require both - // a mode and a period. The period can be either Days or Years but you must select - // one. You cannot specify Days and Years at the same time. + // when you apply ObjectLockConfiguration to a bucket. Bucket settings require + // both a mode and a period. The period can be either Days or Years but you must + // select one. You cannot specify Days and Years at the same time. Rule *ObjectLockRule noSmithyDocumentSerde @@ -2734,41 +2265,43 @@ type ObjectPart struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // The base64-encoded, 256-bit SHA-256 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA256 *string - // The part number identifying the part. This value is a positive integer between 1 - // and 10,000. - PartNumber int32 + // The part number identifying the part. This value is a positive integer between + // 1 and 10,000. + PartNumber *int32 // The size of the uploaded part in bytes. - Size int64 + Size *int64 noSmithyDocumentSerde } @@ -2784,19 +2317,26 @@ type ObjectVersion struct { // Specifies whether the object is (true) or is not (false) the latest version of // an object. - IsLatest bool + IsLatest *bool // The object key. Key *string - // Date and time the object was last modified. + // Date and time when the object was last modified. LastModified *time.Time // Specifies the owner of the object. Owner *Owner + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + RestoreStatus *RestoreStatus + // Size in bytes of the object. - Size int64 + Size *int64 // The class of storage used to store the object. StorageClass ObjectVersionStorageClass @@ -2831,7 +2371,17 @@ type OutputSerialization struct { // Container for the owner's display name and ID. type Owner struct { - // Container for the display name of the owner. + // Container for the display name of the owner. This value is only supported in + // the following Amazon Web Services Regions: + // - US East (N. Virginia) + // - US West (N. California) + // - US West (Oregon) + // - Asia Pacific (Singapore) + // - Asia Pacific (Sydney) + // - Asia Pacific (Tokyo) + // - Europe (Ireland) + // - South America (São Paulo) + // This functionality is not supported for directory buckets. DisplayName *string // Container for the ID of the owner. @@ -2862,9 +2412,16 @@ type OwnershipControlsRule struct { // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer // affect permissions. The bucket owner automatically owns and has full control // over every object in the bucket. The bucket only accepts PUT requests that don't - // specify an ACL or bucket owner full control ACLs, such as the - // bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed - // in the XML format. + // specify an ACL or specify bucket owner full control ACLs (such as the predefined + // bucket-owner-full-control canned ACL or a custom ACL in XML format that grants + // the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced + // and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon + // use cases where you must control access for each object individually. For more + // information about S3 Object Ownership, see Controlling ownership of objects and + // disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. This functionality is not supported for directory + // buckets. Directory buckets use the bucket owner enforced setting for S3 Object + // Ownership. // // This member is required. ObjectOwnership ObjectOwnership @@ -2883,32 +2440,32 @@ type Part struct { // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 32-bit CRC32 checksum of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumCRC32 *string // The base64-encoded, 32-bit CRC32C checksum of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use an API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumCRC32C *string // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be - // present if it was uploaded with the object. With multipart uploads, this may not - // be a checksum value of the object. For more information about how checksums are - // calculated with multipart uploads, see Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // present if it was uploaded with the object. When you use the API operation on an + // object that was uploaded using multipart uploads, this value may not be a direct + // checksum value of the full object. Instead, it's a calculation based on the + // checksum values of each individual part. For more information about how + // checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) // in the Amazon S3 User Guide. ChecksumSHA1 *string // This header can be used as a data integrity check to verify that the data // received is the same data that was originally sent. This header specifies the // base64-encoded, 256-bit SHA-256 digest of the object. For more information, see - // Checking object integrity - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) // in the Amazon S3 User Guide. ChecksumSHA256 *string @@ -2920,10 +2477,23 @@ type Part struct { // Part number identifying the part. This is a positive integer between 1 and // 10,000. - PartNumber int32 + PartNumber *int32 // Size in bytes of the uploaded part data. - Size int64 + Size *int64 + + noSmithyDocumentSerde +} + +// Amazon S3 keys for log objects are partitioned in the following format: +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// PartitionedPrefix defaults to EventTime delivery when server access logs are +// delivered. +type PartitionedPrefix struct { + + // Specifies the partition date source for the partitioned prefix. + // PartitionDateSource can be EventTime or DeliveryTime. + PartitionDateSource PartitionDateSource noSmithyDocumentSerde } @@ -2933,7 +2503,7 @@ type PolicyStatus struct { // The policy status for this bucket. TRUE indicates that this bucket is public. // FALSE indicates that the bucket is not public. - IsPublic bool + IsPublic *bool noSmithyDocumentSerde } @@ -2942,13 +2512,13 @@ type PolicyStatus struct { type Progress struct { // The current number of uncompressed object bytes processed. - BytesProcessed int64 + BytesProcessed *int64 // The current number of bytes of records payload data returned. - BytesReturned int64 + BytesReturned *int64 // The current number of object bytes scanned. - BytesScanned int64 + BytesScanned *int64 noSmithyDocumentSerde } @@ -2965,39 +2535,32 @@ type ProgressEvent struct { // The PublicAccessBlock configuration that you want to apply to this Amazon S3 // bucket. You can enable the configuration options in any combination. For more // information about when Amazon S3 considers a bucket or object public, see The -// Meaning of "Public" -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) // in the Amazon S3 User Guide. type PublicAccessBlockConfiguration struct { // Specifies whether Amazon S3 should block public access control lists (ACLs) for // this bucket and objects in this bucket. Setting this element to TRUE causes the // following behavior: - // - // * PUT Bucket ACL and PUT Object ACL calls fail if the - // specified ACL is public. - // - // * PUT Object calls fail if the request includes a - // public ACL. - // - // * PUT Bucket calls fail if the request includes a public - // ACL. - // + // - PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is + // public. + // - PUT Object calls fail if the request includes a public ACL. + // - PUT Bucket calls fail if the request includes a public ACL. // Enabling this setting doesn't affect existing policies or ACLs. - BlockPublicAcls bool + BlockPublicAcls *bool - // Specifies whether Amazon S3 should block public bucket policies for this bucket. - // Setting this element to TRUE causes Amazon S3 to reject calls to PUT Bucket - // policy if the specified bucket policy allows public access. Enabling this + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to PUT + // Bucket policy if the specified bucket policy allows public access. Enabling this // setting doesn't affect existing bucket policies. - BlockPublicPolicy bool + BlockPublicPolicy *bool // Specifies whether Amazon S3 should ignore public ACLs for this bucket and // objects in this bucket. Setting this element to TRUE causes Amazon S3 to ignore // all public ACLs on this bucket and objects in this bucket. Enabling this setting // doesn't affect the persistence of any existing ACLs and doesn't prevent new // public ACLs from being set. - IgnorePublicAcls bool + IgnorePublicAcls *bool // Specifies whether Amazon S3 should restrict public bucket policies for this // bucket. Setting this element to TRUE restricts access to this bucket to only @@ -3006,7 +2569,7 @@ type PublicAccessBlockConfiguration struct { // stored bucket policies, except that public and cross-account access within any // public bucket policy, including non-public delegation to specific accounts, is // blocked. - RestrictPublicBuckets bool + RestrictPublicBuckets *bool noSmithyDocumentSerde } @@ -3027,9 +2590,8 @@ type QueueConfiguration struct { QueueArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the - // Amazon S3 User Guide. + // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -3048,8 +2610,8 @@ type RecordsEvent struct { noSmithyDocumentSerde } -// Specifies how requests are redirected. In the event of an error, you can specify -// a different error code to return. +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. type Redirect struct { // The host name to use in the redirect request. @@ -3065,22 +2627,21 @@ type Redirect struct { // The object key prefix to use in the redirect request. For example, to redirect // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/, you can set a condition block with KeyPrefixEquals set to docs/ and - // in the Redirect set ReplaceKeyPrefixWith to /documents. Not required if one of - // the siblings is present. Can be present only if ReplaceKeyWith is not provided. - // Replacement must be made for object keys containing special characters (such as - // carriage returns) when using XML requests. For more information, see XML - // related object key constraints - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // documents/ , you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents . Not required if one + // of the siblings is present. Can be present only if ReplaceKeyWith is not + // provided. Replacement must be made for object keys containing special characters + // (such as carriage returns) when using XML requests. For more information, see + // XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . ReplaceKeyPrefixWith *string // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the siblings is present. Can be + // request to error.html . Not required if one of the siblings is present. Can be // present only if ReplaceKeyPrefixWith is not provided. Replacement must be made // for object keys containing special characters (such as carriage returns) when - // using XML requests. For more information, see XML related object key - // constraints - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // using XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . ReplaceKeyWith *string noSmithyDocumentSerde @@ -3104,11 +2665,11 @@ type RedirectAllRequestsTo struct { // A filter that you can specify for selection for modifications on replicas. // Amazon S3 doesn't replicate replica modifications by default. In the latest -// version of replication configuration (when Filter is specified), you can specify -// this element and set the status to Enabled to replicate modifications on -// replicas. If you don't specify the Filter element, Amazon S3 assumes that the -// replication configuration is the earlier version, V1. In the earlier version, -// this element is not allowed. +// version of replication configuration (when Filter is specified), you can +// specify this element and set the status to Enabled to replicate modifications +// on replicas. If you don't specify the Filter element, Amazon S3 assumes that +// the replication configuration is the earlier version, V1. In the earlier +// version, this element is not allowed. type ReplicaModifications struct { // Specifies whether Amazon S3 replicates modifications on replicas. @@ -3125,9 +2686,8 @@ type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role // that Amazon S3 assumes when replicating objects. For more information, see How - // to Set Up Replication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) in - // the Amazon S3 User Guide. + // to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) + // in the Amazon S3 User Guide. // // This member is required. Role *string @@ -3158,24 +2718,25 @@ type ReplicationRule struct { // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter // in your replication configuration, you must also include a // DeleteMarkerReplication element. If your Filter includes a Tag element, the - // DeleteMarkerReplicationStatus must be set to Disabled, because Amazon S3 does + // DeleteMarkerReplication Status must be set to Disabled, because Amazon S3 does // not support replicating delete markers for tag-based rules. For an example - // configuration, see Basic Rule Configuration - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). - // For more information about delete marker replication, see Basic Rule - // Configuration - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). - // If you are using an earlier version of the replication configuration, Amazon S3 - // handles replication of delete markers differently. For more information, see - // Backward Compatibility - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + // configuration, see Basic Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config) + // . For more information about delete marker replication, see Basic Rule + // Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html) + // . If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, see + // Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) + // . DeleteMarkerReplication *DeleteMarkerReplication - // + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 User Guide. ExistingObjectReplication *ExistingObjectReplication // A filter that identifies the subset of objects to which the replication rule - // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + // applies. A Filter must specify exactly one Prefix , Tag , or an And child + // element. Filter ReplicationRuleFilter // A unique identifier for the rule. The maximum value is 255 characters. @@ -3185,8 +2746,8 @@ type ReplicationRule struct { // rule applies. The maximum prefix length is 1,024 characters. To include all // objects in a bucket, specify an empty string. Replacement must be made for // object keys containing special characters (such as carriage returns) when using - // XML requests. For more information, see XML related object key constraints - // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // XML requests. For more information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) + // . // // Deprecated: This member has been deprecated. Prefix *string @@ -3196,16 +2757,15 @@ type ReplicationRule struct { // according to all replication rules. However, if there are two or more rules with // the same destination bucket, then objects will be replicated according to the // rule with the highest priority. The higher the number, the higher the priority. - // For more information, see Replication - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) in the Amazon - // S3 User Guide. - Priority int32 - - // A container that describes additional filters for identifying the source objects - // that you want to replicate. You can choose to enable or disable the replication - // of these objects. Currently, Amazon S3 supports only the filter that you can - // specify for objects created with server-side encryption using a customer managed - // key stored in Amazon Web Services Key Management Service (SSE-KMS). + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon S3 User Guide. + Priority *int32 + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter that + // you can specify for objects created with server-side encryption using a customer + // managed key stored in Amazon Web Services Key Management Service (SSE-KMS). SourceSelectionCriteria *SourceSelectionCriteria noSmithyDocumentSerde @@ -3214,12 +2774,10 @@ type ReplicationRule struct { // A container for specifying rule filters. The filters determine the subset of // objects to which the rule applies. This element is required only if you specify // more than one filter. For example: -// -// * If you specify both a Prefix and a Tag -// filter, wrap these filters in an And tag. -// -// * If you specify a filter based on -// multiple tags, wrap the Tag elements in an And tag. +// - If you specify both a Prefix and a Tag filter, wrap these filters in an And +// tag. +// - If you specify a filter based on multiple tags, wrap the Tag elements in an +// And tag. type ReplicationRuleAndOperator struct { // An object key name prefix that identifies the subset of objects to which the @@ -3233,7 +2791,8 @@ type ReplicationRuleAndOperator struct { } // A filter that identifies the subset of objects to which the replication rule -// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +// applies. A Filter must specify exactly one Prefix , Tag , or an And child +// element. // // The following types satisfy this interface: // @@ -3247,12 +2806,10 @@ type ReplicationRuleFilter interface { // A container for specifying rule filters. The filters determine the subset of // objects to which the rule applies. This element is required only if you specify // more than one filter. For example: -// -// * If you specify both a Prefix and a Tag -// filter, wrap these filters in an And tag. -// -// * If you specify a filter based on -// multiple tags, wrap the Tag elements in an And tag. +// - If you specify both a Prefix and a Tag filter, wrap these filters in an And +// tag. +// - If you specify a filter based on multiple tags, wrap the Tag elements in an +// And tag. type ReplicationRuleFilterMemberAnd struct { Value ReplicationRuleAndOperator @@ -3264,8 +2821,8 @@ func (*ReplicationRuleFilterMemberAnd) isReplicationRuleFilter() {} // An object key name prefix that identifies the subset of objects to which the // rule applies. Replacement must be made for object keys containing special // characters (such as carriage returns) when using XML requests. For more -// information, see XML related object key constraints -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). +// information, see XML related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints) +// . type ReplicationRuleFilterMemberPrefix struct { Value string @@ -3274,8 +2831,8 @@ type ReplicationRuleFilterMemberPrefix struct { func (*ReplicationRuleFilterMemberPrefix) isReplicationRuleFilter() {} -// A container for specifying a tag key and value. The rule applies only to objects -// that have the tag in their tag set. +// A container for specifying a tag key and value. The rule applies only to +// objects that have the tag in their tag set. type ReplicationRuleFilterMemberTag struct { Value Tag @@ -3284,9 +2841,10 @@ type ReplicationRuleFilterMemberTag struct { func (*ReplicationRuleFilterMemberTag) isReplicationRuleFilter() {} -// A container specifying S3 Replication Time Control (S3 RTC) related information, -// including whether S3 RTC is enabled and the time when all objects and operations -// on objects must be replicated. Must be specified together with a Metrics block. +// A container specifying S3 Replication Time Control (S3 RTC) related +// information, including whether S3 RTC is enabled and the time when all objects +// and operations on objects must be replicated. Must be specified together with a +// Metrics block. type ReplicationTime struct { // Specifies whether the replication time is enabled. @@ -3304,11 +2862,11 @@ type ReplicationTime struct { } // A container specifying the time value for S3 Replication Time Control (S3 RTC) -// and replication metrics EventThreshold. +// and replication metrics EventThreshold . type ReplicationTimeValue struct { // Contains an integer specifying time in minutes. Valid value: 15 - Minutes int32 + Minutes *int32 noSmithyDocumentSerde } @@ -3329,7 +2887,7 @@ type RequestProgress struct { // Specifies whether periodic QueryProgress frames should be sent. Valid values: // TRUE, FALSE. Default value: FALSE. - Enabled bool + Enabled *bool noSmithyDocumentSerde } @@ -3338,15 +2896,15 @@ type RequestProgress struct { type RestoreRequest struct { // Lifetime of the active copy in days. Do not use with restores that specify - // OutputLocation. The Days element is required for regular restores, and must not + // OutputLocation . The Days element is required for regular restores, and must not // be provided for select requests. - Days int32 + Days *int32 // The optional description for the job. Description *string // S3 Glacier related parameters pertaining to this job. Do not use with restores - // that specify OutputLocation. + // that specify OutputLocation . GlacierJobParameters *GlacierJobParameters // Describes the location where the restore job's output is stored. @@ -3364,9 +2922,35 @@ type RestoreRequest struct { noSmithyDocumentSerde } +// Specifies the restoration status of an object. Objects in certain storage +// classes must be restored before they can be retrieved. For more information +// about these storage classes and how to work with archived objects, see Working +// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. Only the S3 Express One Zone storage class is supported by directory +// buckets to store objects. +type RestoreStatus struct { + + // Specifies whether the object is currently being restored. If the object + // restoration is in progress, the header returns the value TRUE . For example: + // x-amz-optional-object-attributes: IsRestoreInProgress="true" If the object + // restoration has completed, the header returns the value FALSE . For example: + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" If the object hasn't been restored, + // there is no header response. + IsRestoreInProgress *bool + + // Indicates when the restored copy will expire. This value is populated only if + // the object has already been restored. For example: + // x-amz-optional-object-attributes: IsRestoreInProgress="false", + // RestoreExpiryDate="2012-12-21T00:00:00.000Z" + RestoreExpiryDate *time.Time + + noSmithyDocumentSerde +} + // Specifies the redirect behavior and when a redirect is applied. For more -// information about routing rules, see Configuring advanced conditional redirects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// information about routing rules, see Configuring advanced conditional redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) // in the Amazon S3 User Guide. type RoutingRule struct { @@ -3441,13 +3025,13 @@ type ScanRange struct { // non-negative integers. The default value is one less than the size of the object // being queried. If only the End parameter is supplied, it is interpreted to mean // scan the last N bytes of the file. For example, 50 means scan the last 50 bytes. - End int64 + End *int64 - // Specifies the start of the byte range. This parameter is optional. Valid values: - // non-negative integers. The default value is 0. If only start is supplied, it - // means scan from that point to the end of the file. For example, 50 means scan - // from byte 50 until the end of the file. - Start int64 + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is + // supplied, it means scan from that point to the end of the file. For example, 50 + // means scan from byte 50 until the end of the file. + Start *int64 noSmithyDocumentSerde } @@ -3542,8 +3126,7 @@ type SelectParameters struct { // at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key // in your Amazon Web Services account the first time that you add an object // encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for -// SSE-KMS. For more information, see PUT Bucket encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// SSE-KMS. For more information, see PUT Bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) // in the Amazon S3 API Reference. type ServerSideEncryptionByDefault struct { @@ -3554,23 +3137,18 @@ type ServerSideEncryptionByDefault struct { // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services // KMS key ID to use for the default encryption. This parameter is allowed if and - // only if SSEAlgorithm is set to aws:kms. You can specify the key ID or the Amazon - // Resource Name (ARN) of the KMS key. However, if you are using encryption with - // cross-account or Amazon Web Services service operations you must use a fully - // qualified KMS key ARN. For more information, see Using encryption for - // cross-account operations - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: - // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // Amazon - // S3 only supports symmetric KMS keys and not asymmetric KMS keys. For more - // information, see Using symmetric and asymmetric keys - // (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // only if SSEAlgorithm is set to aws:kms . You can specify the key ID, key alias, + // or the Amazon Resource Name (ARN) of the KMS key. + // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // - Key ARN: + // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // - Key Alias: alias/alias-name + // If you use a key ID, you can run into a LogDestination undeliverable error when + // creating a VPC flow log. If you are using encryption with cross-account or + // Amazon Web Services service operations you must use a fully qualified KMS key + // ARN. For more information, see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) + // . Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. KMSMasterKeyID *string @@ -3601,28 +3179,70 @@ type ServerSideEncryptionRule struct { // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects // are not affected. Setting the BucketKeyEnabled element to true causes Amazon S3 // to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. For more - // information, see Amazon S3 Bucket Keys - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) in the Amazon - // S3 User Guide. - BucketKeyEnabled bool + // information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. + BucketKeyEnabled *bool + + noSmithyDocumentSerde +} + +// The established temporary security credentials of the session. Directory +// buckets - These session credentials are only supported for the authentication +// and authorization of Zonal endpoint APIs on directory buckets. +type SessionCredentials struct { + + // A unique identifier that's associated with a secret access key. The access key + // ID and the secret access key are used together to sign programmatic Amazon Web + // Services requests cryptographically. + // + // This member is required. + AccessKeyId *string + + // Temporary security credentials expire after a specified interval. After + // temporary credentials expire, any calls that you make with those credentials + // will fail. So you must generate a new set of temporary credentials. Temporary + // credentials cannot be extended or refreshed beyond the original specified + // interval. + // + // This member is required. + Expiration *time.Time + + // A key that's used with the access key ID to cryptographically sign programmatic + // Amazon Web Services requests. Signing a request identifies the sender and + // prevents the request from being altered. + // + // This member is required. + SecretAccessKey *string + + // A part of the temporary security credentials. The session token is used to + // validate the temporary security credentials. + // + // This member is required. + SessionToken *string noSmithyDocumentSerde } -// A container that describes additional filters for identifying the source objects -// that you want to replicate. You can choose to enable or disable the replication -// of these objects. Currently, Amazon S3 supports only the filter that you can -// specify for objects created with server-side encryption using a customer managed -// key stored in Amazon Web Services Key Management Service (SSE-KMS). +// To use simple format for S3 keys for log objects, set SimplePrefix to an empty +// object. [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +type SimplePrefix struct { + noSmithyDocumentSerde +} + +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter that +// you can specify for objects created with server-side encryption using a customer +// managed key stored in Amazon Web Services Key Management Service (SSE-KMS). type SourceSelectionCriteria struct { // A filter that you can specify for selections for modifications on replicas. // Amazon S3 doesn't replicate replica modifications by default. In the latest - // version of replication configuration (when Filter is specified), you can specify - // this element and set the status to Enabled to replicate modifications on - // replicas. If you don't specify the Filter element, Amazon S3 assumes that the - // replication configuration is the earlier version, V1. In the earlier version, - // this element is not allowed + // version of replication configuration (when Filter is specified), you can + // specify this element and set the status to Enabled to replicate modifications + // on replicas. If you don't specify the Filter element, Amazon S3 assumes that + // the replication configuration is the earlier version, V1. In the earlier + // version, this element is not allowed ReplicaModifications *ReplicaModifications // A container for filter information for the selection of Amazon S3 objects @@ -3636,9 +3256,8 @@ type SourceSelectionCriteria struct { // Specifies the use of SSE-KMS to encrypt delivered inventory reports. type SSEKMS struct { - // Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web - // Services KMS) symmetric customer managed key to use for encrypting inventory - // reports. + // Specifies the ID of the Key Management Service (KMS) symmetric encryption + // customer managed key to use for encrypting inventory reports. // // This member is required. KeyId *string @@ -3669,13 +3288,13 @@ type SSES3 struct { type Stats struct { // The total number of uncompressed object bytes processed. - BytesProcessed int64 + BytesProcessed *int64 // The total number of bytes of records payload data returned. - BytesReturned int64 + BytesReturned *int64 // The total number of object bytes scanned. - BytesScanned int64 + BytesScanned *int64 noSmithyDocumentSerde } @@ -3693,15 +3312,15 @@ type StatsEvent struct { // analyze the tradeoffs between different storage classes for an Amazon S3 bucket. type StorageClassAnalysis struct { - // Specifies how data related to the storage class analysis for an Amazon S3 bucket - // should be exported. + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. DataExport *StorageClassAnalysisDataExport noSmithyDocumentSerde } -// Container for data related to the storage class analysis for an Amazon S3 bucket -// for export. +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. type StorageClassAnalysisDataExport struct { // The place to store the data for an analysis. @@ -3709,7 +3328,7 @@ type StorageClassAnalysisDataExport struct { // This member is required. Destination *AnalyticsExportDestination - // The version of the output schema to use when exporting data. Must be V_1. + // The version of the output schema to use when exporting data. Must be V_1 . // // This member is required. OutputSchemaVersion StorageClassAnalysisSchemaVersion @@ -3746,8 +3365,7 @@ type Tagging struct { // Container for granting information. Buckets that use the bucket owner enforced // setting for Object Ownership don't support target grants. For more information, -// see Permissions server access log delivery -// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// see Permissions server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) // in the Amazon S3 User Guide. type TargetGrant struct { @@ -3760,14 +3378,27 @@ type TargetGrant struct { noSmithyDocumentSerde } +// Amazon S3 key format for log objects. Only one format, PartitionedPrefix or +// SimplePrefix, is allowed. +type TargetObjectKeyFormat struct { + + // Partitioned S3 key for log objects. + PartitionedPrefix *PartitionedPrefix + + // To use the simple format for S3 keys for log objects. To specify SimplePrefix + // format, set SimplePrefix to {}. + SimplePrefix *SimplePrefix + + noSmithyDocumentSerde +} + // The S3 Intelligent-Tiering storage class is designed to optimize storage costs // by automatically moving data to the most cost-effective storage access tier, // without additional operational overhead. type Tiering struct { // S3 Intelligent-Tiering access tier. See Storage class for automatically - // optimizing frequently and infrequently accessed objects - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // optimizing frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) // for a list of access tiers in the S3 Intelligent-Tiering storage class. // // This member is required. @@ -3780,7 +3411,7 @@ type Tiering struct { // days). // // This member is required. - Days int32 + Days *int32 noSmithyDocumentSerde } @@ -3791,9 +3422,8 @@ type Tiering struct { type TopicConfiguration struct { // The Amazon S3 bucket event about which to send notifications. For more - // information, see Supported Event Types - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the - // Amazon S3 User Guide. + // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. // // This member is required. Events []Event @@ -3805,9 +3435,8 @@ type TopicConfiguration struct { TopicArn *string // Specifies object key name filtering rules. For information about key name - // filtering, see Configuring Event Notifications - // (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) in the - // Amazon S3 User Guide. + // filtering, see Configuring event notifications using object key name filtering (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter // An optional unique identifier for configurations in a notification @@ -3819,18 +3448,17 @@ type TopicConfiguration struct { // Specifies when an object transitions to a specified storage class. For more // information about Amazon S3 lifecycle configuration rules, see Transitioning -// Objects Using Amazon S3 Lifecycle -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) // in the Amazon S3 User Guide. type Transition struct { - // Indicates when objects are transitioned to the specified storage class. The date - // value must be in ISO 8601 format. The time is always midnight UTC. + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. Date *time.Time - // Indicates the number of days after creation when objects are transitioned to the - // specified storage class. The value must be a positive integer. - Days int32 + // Indicates the number of days after creation when objects are transitioned to + // the specified storage class. The value must be a positive integer. + Days *int32 // The storage class to which you want the object to transition. StorageClass TransitionStorageClass @@ -3838,9 +3466,8 @@ type Transition struct { noSmithyDocumentSerde } -// Describes the versioning state of an Amazon S3 bucket. For more information, see -// PUT Bucket versioning -// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) // in the Amazon S3 API Reference. type VersioningConfiguration struct { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go index ccd845a7..e954b302 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/validators.go @@ -110,6 +110,26 @@ func (m *validateOpCreateMultipartUpload) HandleInitialize(ctx context.Context, return next.HandleInitialize(ctx, in) } +type validateOpCreateSession struct { +} + +func (*validateOpCreateSession) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpCreateSession) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*CreateSessionInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpCreateSessionInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteBucketAnalyticsConfiguration struct { } @@ -1870,6 +1890,10 @@ func addOpCreateMultipartUploadValidationMiddleware(stack *middleware.Stack) err return stack.Initialize.Add(&validateOpCreateMultipartUpload{}, middleware.After) } +func addOpCreateSessionValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpCreateSession{}, middleware.After) +} + func addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteBucketAnalyticsConfiguration{}, middleware.After) } @@ -2699,6 +2723,9 @@ func validateInventoryConfiguration(v *types.InventoryConfiguration) error { invalidParams.AddNested("Destination", err.(smithy.InvalidParamsError)) } } + if v.IsEnabled == nil { + invalidParams.Add(smithy.NewErrParamRequired("IsEnabled")) + } if v.Filter != nil { if err := validateInventoryFilter(v.Filter); err != nil { invalidParams.AddNested("Filter", err.(smithy.InvalidParamsError)) @@ -3735,6 +3762,9 @@ func validateTiering(v *types.Tiering) error { return nil } invalidParams := smithy.InvalidParamsError{Context: "Tiering"} + if v.Days == nil { + invalidParams.Add(smithy.NewErrParamRequired("Days")) + } if len(v.AccessTier) == 0 { invalidParams.Add(smithy.NewErrParamRequired("AccessTier")) } @@ -3925,6 +3955,21 @@ func validateOpCreateMultipartUploadInput(v *CreateMultipartUploadInput) error { } } +func validateOpCreateSessionInput(v *CreateSessionInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "CreateSessionInput"} + if v.Bucket == nil { + invalidParams.Add(smithy.NewErrParamRequired("Bucket")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteBucketAnalyticsConfigurationInput(v *DeleteBucketAnalyticsConfigurationInput) error { if v == nil { return nil @@ -5444,6 +5489,9 @@ func validateOpUploadPartCopyInput(v *UploadPartCopyInput) error { if v.Key == nil { invalidParams.Add(smithy.NewErrParamRequired("Key")) } + if v.PartNumber == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) + } if v.UploadId == nil { invalidParams.Add(smithy.NewErrParamRequired("UploadId")) } @@ -5465,6 +5513,9 @@ func validateOpUploadPartInput(v *UploadPartInput) error { if v.Key == nil { invalidParams.Add(smithy.NewErrParamRequired("Key")) } + if v.PartNumber == nil { + invalidParams.Add(smithy.NewErrParamRequired("PartNumber")) + } if v.UploadId == nil { invalidParams.Add(smithy.NewErrParamRequired("UploadId")) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go new file mode 100644 index 00000000..dd950a28 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "github.com/aws/aws-sdk-go/aws" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpire is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(aws.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(aws.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx aws.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(aws.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 74f35ccf..b147f103 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -10,12 +10,13 @@ import ( // A Config provides configuration to a service client instance. type Config struct { - Config *aws.Config - Handlers request.Handlers - PartitionID string - Endpoint string - SigningRegion string - SigningName string + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + ResolvedRegion string // States that the signing name did not come from a modeled source but // was derived based on other data. Used by service client constructors diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 0c48f72e..a7530ebb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -2,13 +2,14 @@ package metadata // ClientInfo wraps immutable data from the client.Client structure. type ClientInfo struct { - ServiceName string - ServiceID string - APIVersion string - PartitionID string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string + ResolvedRegion string } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 39fa6d5f..c483e0cb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -20,16 +20,16 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the defaults.DefaultConfig structure. // -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(&aws.Config{ -// MaxRetries: aws.Int(3), -// })) +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) // -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, &aws.Config{ -// Region: aws.String("us-west-2"), -// }) +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) type Config struct { // Enables verbose error printing of all credential chain errors. // Should be used when wanting to see all errors while attempting to @@ -170,6 +170,9 @@ type Config struct { // // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case // Metadata member's map keys. The value of the header in the map is unaffected. + // + // The AWS SDK for Go v2, uses lower case header maps by default. The v1 + // SDK provides this opt-in for this option, for backwards compatibility. LowerCaseHeaderMaps *bool // Set this to `true` to disable the EC2Metadata client from overriding the @@ -189,6 +192,23 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool + // Set this to `false` to disable EC2Metadata client from falling back to IMDSv1. + // By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility. + // You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata + // client will return any errors encountered from attempting to fetch a token instead of silently + // using the insecure data flow of IMDSv1. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataEnableFallback(false))) + // + // svc := s3.New(sess) + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EC2MetadataEnableFallback *bool + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. @@ -208,8 +228,19 @@ type Config struct { // svc := s3.New(sess, &aws.Config{ // UseDualStack: aws.Bool(true), // }) + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. UseDualStack *bool + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint endpoints.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint endpoints.FIPSEndpointState + // SleepDelay is an override for the func the SDK will call when sleeping // during the lifecycle of a request. Specifically this will be used for // request delays. This value should only be used for testing. To adjust @@ -269,16 +300,16 @@ type Config struct { // NewConfig returns a new Config pointer that can be chained with builder // methods to set multiple configuration values inline without using pointers. // -// // Create Session with MaxRetries configuration to be shared by multiple -// // service clients. -// sess := session.Must(session.NewSession(aws.NewConfig(). -// WithMaxRetries(3), -// )) +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) // -// // Create S3 service client with a specific Region. -// svc := s3.New(sess, aws.NewConfig(). -// WithRegion("us-west-2"), -// ) +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) func NewConfig() *Config { return &Config{} } @@ -411,6 +442,17 @@ func (c *Config) WithUseDualStack(enable bool) *Config { return c } +// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config +// pointer for chaining. +func (c *Config) WithUseFIPSEndpoint(enable bool) *Config { + if enable { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } else { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled + } + return c +} + // WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value // returning a Config pointer for chaining. func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { @@ -418,6 +460,13 @@ func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { return c } +// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config { + c.EC2MetadataEnableFallback = &v + return c +} + // WithSleepDelay overrides the function used to sleep while waiting for the // next retry. Defaults to time.Sleep. func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { @@ -554,10 +603,18 @@ func mergeInConfig(dst *Config, other *Config) { dst.UseDualStack = other.UseDualStack } + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + if other.EC2MetadataDisableTimeoutOverride != nil { dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride } + if other.EC2MetadataEnableFallback != nil { + dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback + } + if other.SleepDelay != nil { dst.SleepDelay = other.SleepDelay } @@ -589,6 +646,14 @@ func mergeInConfig(dst *Config, other *Config) { if other.LowerCaseHeaderMaps != nil { dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps } + + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + + if other.UseFIPSEndpoint != endpoints.FIPSEndpointStateUnset { + dst.UseFIPSEndpoint = other.UseFIPSEndpoint + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go new file mode 100644 index 00000000..140242dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go @@ -0,0 +1,4 @@ +// DO NOT EDIT +package corehandlers + +const isAwsInternal = "" \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go index ab69c7a6..ac842c55 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -35,3 +35,13 @@ var AddHostExecEnvUserAgentHander = request.NamedHandler{ request.AddToUserAgent(r, execEnvUAKey+"/"+v) }, } + +var AddAwsInternal = request.NamedHandler{ + Name: "core.AddAwsInternal", + Fn: func(r *request.Request) { + if len(isAwsInternal) == 0 { + return + } + request.AddToUserAgent(r, isAwsInternal) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index 785f30d8..329f788a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -31,6 +31,8 @@ package endpointcreds import ( "encoding/json" + "fmt" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -69,7 +71,37 @@ type Provider struct { // Optional authorization token value if set will be used as the value of // the Authorization header of the endpoint credential request. + // + // When constructed from environment, the provider will use the value of + // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token + // + // Will be overridden if AuthorizationTokenProvider is configured AuthorizationToken string + + // Optional auth provider func to dynamically load the auth token from a file + // everytime a credential is retrieved + // + // When constructed from environment, the provider will read and use the content + // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable + // as the auth token everytime credentials are retrieved + // + // Will override AuthorizationToken if configured + AuthorizationTokenProvider AuthTokenProvider +} + +// AuthTokenProvider defines an interface to dynamically load a value to be passed +// for the Authorization header of a credentials request. +type AuthTokenProvider interface { + GetToken() (string, error) +} + +// TokenProviderFunc is a func type implementing AuthTokenProvider interface +// and enables customizing token provider behavior +type TokenProviderFunc func() (string, error) + +// GetToken func retrieves auth token according to TokenProviderFunc implementation +func (p TokenProviderFunc) GetToken() (string, error) { + return p() } // NewProviderClient returns a credentials Provider for retrieving AWS credentials @@ -164,7 +196,20 @@ func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error req := p.Client.NewRequest(op, nil, out) req.SetContext(ctx) req.HTTPRequest.Header.Set("Accept", "application/json") - if authToken := p.AuthorizationToken; len(authToken) != 0 { + + authToken := p.AuthorizationToken + var err error + if p.AuthorizationTokenProvider != nil { + authToken, err = p.AuthorizationTokenProvider.GetToken() + if err != nil { + return nil, fmt.Errorf("get authorization token: %v", err) + } + } + + if strings.ContainsAny(authToken, "\r\n") { + return nil, fmt.Errorf("authorization token contains invalid newline sequence") + } + if len(authToken) != 0 { req.HTTPRequest.Header.Set("Authorization", authToken) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go index e6248360..18694f07 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -226,12 +226,24 @@ func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) return credentials.NewCredentials(p) } -type credentialProcessResponse struct { - Version int - AccessKeyID string `json:"AccessKeyId"` +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. SecretAccessKey string - SessionToken string - Expiration *time.Time + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time } // Retrieve executes the 'credential_process' and returns the credentials. @@ -242,7 +254,7 @@ func (p *ProcessProvider) Retrieve() (credentials.Value, error) { } // Serialize and validate response - resp := &credentialProcessResponse{} + resp := &CredentialProcessResponse{} if err = json.Unmarshal(out, resp); err != nil { return credentials.Value{ProviderName: ProviderName}, awserr.New( ErrCodeProcessProviderParse, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go index 6eda2a55..4138e725 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -4,13 +4,13 @@ import ( "crypto/sha1" "encoding/hex" "encoding/json" - "fmt" "io/ioutil" "path/filepath" "strings" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" @@ -55,6 +55,19 @@ type Provider struct { // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + TokenProvider bearer.TokenProvider } // NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured @@ -89,13 +102,31 @@ func (p *Provider) Retrieve() (credentials.Value, error) { // RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal // by exchanging the accessToken present in ~/.aws/sso/cache. func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { - tokenFile, err := loadTokenFile(p.StartURL) - if err != nil { - return credentials.Value{}, err + var accessToken *string + if p.TokenProvider != nil { + token, err := p.TokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return credentials.Value{}, err + } + accessToken = &token.Value + } else { + if p.CachedTokenFilepath == "" { + cachedTokenFilePath, err := getCachedFilePath(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + p.CachedTokenFilepath = cachedTokenFilePath + } + + tokenFile, err := loadTokenFile(p.CachedTokenFilepath) + if err != nil { + return credentials.Value{}, err + } + accessToken = &tokenFile.AccessToken } output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ - AccessToken: &tokenFile.AccessToken, + AccessToken: accessToken, AccountId: &p.AccountID, RoleName: &p.RoleName, }) @@ -114,32 +145,13 @@ func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Val }, nil } -func getCacheFileName(url string) (string, error) { +func getCachedFilePath(startUrl string) (string, error) { hash := sha1.New() - _, err := hash.Write([]byte(url)) + _, err := hash.Write([]byte(startUrl)) if err != nil { return "", err } - return strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json", nil -} - -type rfc3339 time.Time - -func (r *rfc3339) UnmarshalJSON(bytes []byte) error { - var value string - - if err := json.Unmarshal(bytes, &value); err != nil { - return err - } - - parse, err := time.Parse(time.RFC3339, value) - if err != nil { - return fmt.Errorf("expected RFC3339 timestamp: %v", err) - } - - *r = rfc3339(parse) - - return nil + return filepath.Join(defaultCacheLocation(), strings.ToLower(hex.EncodeToString(hash.Sum(nil)))+".json"), nil } type token struct { @@ -153,13 +165,8 @@ func (t token) Expired() bool { return nowTime().Round(0).After(time.Time(t.ExpiresAt)) } -func loadTokenFile(startURL string) (t token, err error) { - key, err := getCacheFileName(startURL) - if err != nil { - return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) - } - - fileBytes, err := ioutil.ReadFile(filepath.Join(defaultCacheLocation(), key)) +func loadTokenFile(cachedTokenPath string) (t token, err error) { + fileBytes, err := ioutil.ReadFile(cachedTokenPath) if err != nil { return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 00000000..f6fa8845 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,237 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +var resolvedOsUserHomeDir = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := resolvedOsUserHomeDir() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %v", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type cachedToken struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +// MarshalJSON provides custom marshalling because the standard library Go marshaller ignores unknown/unspecified fields +// when marshalling from a struct: https://pkg.go.dev/encoding/json#Marshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t cachedToken) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +// UnmarshalJSON provides custom unmarshalling because the standard library Go unmarshaller ignores unknown/unspecified +// fields when unmarshalling from a struct: https://pkg.go.dev/encoding/json#Unmarshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t *cachedToken) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %v", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (cachedToken, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to read cached SSO token file, %v", err) + } + + var t cachedToken + if err := json.Unmarshal(fileBytes, &t); err != nil { + return cachedToken{}, fmt.Errorf("failed to parse cached SSO token file, %v", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return cachedToken{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t cachedToken, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(nowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %v", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t cachedToken) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %v", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %v", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %v", err) + } + + return nil +} + +type rfc3339 time.Time + +// UnmarshalJSON decode rfc3339 from JSON format +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + var err error + + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + return err +} + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + return rfc3339(parsed), nil +} + +// MarshalJSON encode rfc3339 to JSON format time +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go new file mode 100644 index 00000000..3388b78b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go @@ -0,0 +1,148 @@ +package ssocreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" + "github.com/aws/aws-sdk-go/service/ssooidc" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(input *ssooidc.CreateTokenInput) (*ssooidc.CreateTokenOutput, error) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides a utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p *SSOTokenProvider) RetrieveBearerToken(ctx aws.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && nowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %v", err) + } + } + + expiresAt := toTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) { + if token.ClientSecret == "" || token.ClientID == "" || token.RefreshToken == "" { + return cachedToken{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(&ssooidc.CreateTokenInput{ + ClientId: &token.ClientID, + ClientSecret: &token.ClientSecret, + RefreshToken: &token.RefreshToken, + GrantType: aws.String("refresh_token"), + }) + if err != nil { + return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err) + } + if createResult.ExpiresIn == nil { + return cachedToken{}, fmt.Errorf("missing required field ExpiresIn") + } + if createResult.AccessToken == nil { + return cachedToken{}, fmt.Errorf("missing required field AccessToken") + } + if createResult.RefreshToken == nil { + return cachedToken{}, fmt.Errorf("missing required field RefreshToken") + } + + expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second) + + token.AccessToken = *createResult.AccessToken + token.ExpiresAt = (*rfc3339)(&expiresAt) + token.RefreshToken = *createResult.RefreshToken + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to stat cached SSO token file %v", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, token, fileInfo.Mode()); err != nil { + return cachedToken{}, fmt.Errorf("unable to cache refreshed SSO token, %v", err) + } + + return token, nil +} + +func toTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 260a37cb..86db488d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -9,7 +9,7 @@ to refresh the credentials will be synchronized. But, the SDK is unable to ensure synchronous usage of the AssumeRoleProvider if the value is shared between multiple Credentials, Sessions or service clients. -Assume Role +# Assume Role To assume an IAM role using STS with the SDK you can create a new Credentials with the SDKs's stscreds package. @@ -27,7 +27,7 @@ with the SDKs's stscreds package. // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with static MFA Token +# Assume Role with static MFA Token To assume an IAM role with a MFA token you can either specify a MFA token code directly or provide a function to prompt the user each time the credentials @@ -49,7 +49,7 @@ credentials. // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) -Assume Role with MFA Token Provider +# Assume Role with MFA Token Provider To assume an IAM role with MFA for longer running tasks where the credentials may need to be refreshed setting the TokenProvider field of AssumeRoleProvider @@ -74,7 +74,6 @@ single Credentials with an AssumeRoleProvider can be shared safely. // Create service client value configured for credentials // from assumed role. svc := s3.New(sess, &aws.Config{Credentials: creds}) - */ package stscreds @@ -199,6 +198,10 @@ type AssumeRoleProvider struct { // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). SerialNumber *string + // The SourceIdentity which is used to identity a persistent identity through the whole session. + // For more details see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + // The value provided by the MFA device, if the trust policy of the role being // assumed requires MFA (that is, if the policy includes a condition that tests // for MFA). If the role being assumed requires MFA and if the TokenCode value @@ -320,6 +323,7 @@ func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (crede Tags: p.Tags, PolicyArns: p.PolicyArns, TransitiveTagKeys: p.TransitiveTagKeys, + SourceIdentity: p.SourceIdentity, } if p.Policy != nil { input.Policy = p.Policy diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go index cefe2a76..19ad619a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -28,7 +28,7 @@ const ( // compare test values. var now = time.Now -// TokenFetcher shuold return WebIdentity token bytes or an error +// TokenFetcher should return WebIdentity token bytes or an error type TokenFetcher interface { FetchToken(credentials.Context) ([]byte, error) } @@ -50,6 +50,8 @@ func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { // an OIDC token. type WebIdentityRoleProvider struct { credentials.Expiry + + // The policy ARNs to use with the web identity assumed role. PolicyArns []*sts.PolicyDescriptorType // Duration the STS credentials will be valid for. Truncated to seconds. @@ -74,6 +76,9 @@ type WebIdentityRoleProvider struct { // NewWebIdentityCredentials will return a new set of credentials with a given // configuration, role arn, and token file path. +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options, and wrap with credentials.NewCredentials helper. func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { svc := sts.New(c) p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) @@ -82,19 +87,42 @@ func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName // NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the // provided stsiface.STSAPI +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { - return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, FetchTokenPath(path)) } // NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the // provided stsiface.STSAPI and a TokenFetcher +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { - return &WebIdentityRoleProvider{ + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, tokenFetcher) +} + +// NewWebIdentityRoleProviderWithOptions will return an initialize +// WebIdentityRoleProvider with the provided stsiface.STSAPI, role ARN, and a +// TokenFetcher. Additional options can be provided as functional options. +// +// TokenFetcher is the implementation that will retrieve the JWT token from to +// assume the role with. Use the provided FetchTokenPath implementation to +// retrieve the JWT token using a file system path. +func NewWebIdentityRoleProviderWithOptions(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher, optFns ...func(*WebIdentityRoleProvider)) *WebIdentityRoleProvider { + p := WebIdentityRoleProvider{ client: svc, tokenFetcher: tokenFetcher, roleARN: roleARN, roleSessionName: roleSessionName, } + + for _, fn := range optFns { + fn(&p) + } + + return &p } // Retrieve will attempt to assume a role from a token which is located at @@ -104,9 +132,9 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { return p.RetrieveWithContext(aws.BackgroundContext()) } -// RetrieveWithContext will attempt to assume a role from a token which is located at -// 'WebIdentityTokenFilePath' specified destination and if that is empty an -// error will be returned. +// RetrieveWithContext will attempt to assume a role from a token which is +// located at 'WebIdentityTokenFilePath' specified destination and if that is +// empty an error will be returned. func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { b, err := p.tokenFetcher.FetchToken(ctx) if err != nil { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 23bb639e..1ba80b57 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,6 +9,7 @@ package defaults import ( "fmt" + "io/ioutil" "net" "net/http" "net/url" @@ -74,6 +75,7 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddAwsInternal) handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) @@ -114,9 +116,31 @@ func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Pro const ( httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" ) +// direct representation of the IPv4 address for the ECS container +// "169.254.170.2" +var ecsContainerIPv4 net.IP = []byte{ + 169, 254, 170, 2, +} + +// direct representation of the IPv4 address for the EKS container +// "169.254.170.23" +var eksContainerIPv4 net.IP = []byte{ + 169, 254, 170, 23, +} + +// direct representation of the IPv6 address for the EKS container +// "fd00:ec2::23" +var eksContainerIPv6 net.IP = []byte{ + 0xFD, 0, 0xE, 0xC2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0x23, +} + // RemoteCredProvider returns a credentials provider for the default remote // endpoints such as EC2 or ECS Roles. func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { @@ -134,19 +158,22 @@ func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.P var lookupHostFn = net.LookupHost -func isLoopbackHost(host string) (bool, error) { - ip := net.ParseIP(host) - if ip != nil { - return ip.IsLoopback(), nil +// isAllowedHost allows host to be loopback or known ECS/EKS container IPs +// +// host can either be an IP address OR an unresolved hostname - resolution will +// be automatically performed in the latter case +func isAllowedHost(host string) (bool, error) { + if ip := net.ParseIP(host); ip != nil { + return isIPAllowed(ip), nil } - // Host is not an ip, perform lookup addrs, err := lookupHostFn(host) if err != nil { return false, err } + for _, addr := range addrs { - if !net.ParseIP(addr).IsLoopback() { + if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { return false, nil } } @@ -154,6 +181,13 @@ func isLoopbackHost(host string) (bool, error) { return true, nil } +func isIPAllowed(ip net.IP) bool { + return ip.IsLoopback() || + ip.Equal(ecsContainerIPv4) || + ip.Equal(eksContainerIPv4) || + ip.Equal(eksContainerIPv6) +} + func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { var errMsg string @@ -164,10 +198,12 @@ func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) host := aws.URLHostname(parsed) if len(host) == 0 { errMsg = "unable to parse host from local HTTP cred provider URL" - } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { - errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) - } else if !isLoopback { - errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } else if parsed.Scheme == "http" { + if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr) + } else if !isAllowedHost { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host) + } } } @@ -189,6 +225,15 @@ func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) crede func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { + p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { + if contents, err := ioutil.ReadFile(authFilePath); err != nil { + return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) + } else { + return string(contents), nil + } + }) + } }, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index df63bade..f4cc8751 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -57,13 +57,13 @@ type EC2Metadata struct { // New creates a new instance of the EC2Metadata client with a session. // This client is safe to use across multiple goroutines. // -// // Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) // -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { c := p.ClientConfig(ServiceName, cfgs...) return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go index 4b29f190..f1f9ba4e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -1,6 +1,8 @@ package ec2metadata import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" "net/http" "sync/atomic" "time" @@ -33,11 +35,15 @@ func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { return &tokenProvider{client: c, configuredTTL: duration} } +// check if fallback is enabled +func (t *tokenProvider) fallbackEnabled() bool { + return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback +} + // fetchTokenHandler fetches token for EC2Metadata service client by default. func (t *tokenProvider) fetchTokenHandler(r *request.Request) { - // short-circuits to insecure data flow if tokenProvider is disabled. - if v := atomic.LoadUint32(&t.disabled); v == 1 { + if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() { return } @@ -49,23 +55,23 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) { output, err := t.client.getToken(r.Context(), t.configuredTTL) if err != nil { + // only attempt fallback to insecure data flow if IMDSv1 is enabled + if !t.fallbackEnabled() { + r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err) + return + } - // change the disabled flag on token provider to true, - // when error is request timeout error. + // change the disabled flag on token provider to true and fallback if requestFailureError, ok := err.(awserr.RequestFailure); ok { switch requestFailureError.StatusCode() { case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: atomic.StoreUint32(&t.disabled, 1) + if t.client.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) { + t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError)) + } case http.StatusBadRequest: r.Error = requestFailureError } - - // Check if request timed out while waiting for response - if e, ok := requestFailureError.OrigErr().(awserr.Error); ok { - if e.Code() == request.ErrCodeRequestError { - atomic.StoreUint32(&t.disabled, 1) - } - } } return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index b98ea869..cad3b9a4 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -31,12 +31,12 @@ func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { // allow you to get a list of the partitions in the order the endpoints // will be resolved in. // -// resolver, err := endpoints.DecodeModel(reader) +// resolver, err := endpoints.DecodeModel(reader) // -// partitions := resolver.(endpoints.EnumPartitions).Partitions() -// for _, p := range partitions { -// // ... inspect partitions -// } +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { var opts DecodeModelOptions opts.Set(optFns...) @@ -81,7 +81,6 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol // Customization for i := 0; i < len(ps); i++ { p := &ps[i] - custAddS3DualStack(p) custRegionalS3(p) custRmIotDataService(p) custFixAppAutoscalingChina(p) @@ -91,15 +90,6 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol return ps, nil } -func custAddS3DualStack(p *partition) { - if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { - return - } - - custAddDualstack(p, "s3") - custAddDualstack(p, "s3-control") -} - func custRegionalS3(p *partition) { if p.ID != "aws" { return @@ -110,35 +100,28 @@ func custRegionalS3(p *partition) { return } + const awsGlobal = "aws-global" + const usEast1 = "us-east-1" + // If global endpoint already exists no customization needed. - if _, ok := service.Endpoints["aws-global"]; ok { + if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok { return } - service.PartitionEndpoint = "aws-global" - service.Endpoints["us-east-1"] = endpoint{} - service.Endpoints["aws-global"] = endpoint{ + service.PartitionEndpoint = awsGlobal + if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok { + service.Endpoints[endpointKey{Region: usEast1}] = endpoint{} + } + service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{ Hostname: "s3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: usEast1, }, } p.Services["s3"] = service } -func custAddDualstack(p *partition, svcName string) { - s, ok := p.Services[svcName] - if !ok { - return - } - - s.Defaults.HasDualStack = boxedTrue - s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - - p.Services[svcName] = s -} - func custRmIotDataService(p *partition) { delete(p.Services, "data.iot") } @@ -155,12 +138,13 @@ func custFixAppAutoscalingChina(p *partition) { } const expectHostname = `autoscaling.{region}.amazonaws.com` - if e, a := s.Defaults.Hostname, expectHostname; e != a { + serviceDefault := s.Defaults[defaultKey{}] + if e, a := expectHostname, serviceDefault.Hostname; e != a { fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) return } - - s.Defaults.Hostname = expectHostname + ".cn" + serviceDefault.Hostname = expectHostname + ".cn" + s.Defaults[defaultKey{}] = serviceDefault p.Services[serviceName] = s } @@ -175,18 +159,25 @@ func custFixAppAutoscalingUsGov(p *partition) { return } - if a := s.Defaults.CredentialScope.Service; a != "" { + serviceDefault := s.Defaults[defaultKey{}] + if a := serviceDefault.CredentialScope.Service; a != "" { fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) return } - if a := s.Defaults.Hostname; a != "" { + if a := serviceDefault.Hostname; a != "" { fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) return } - s.Defaults.CredentialScope.Service = "application-autoscaling" - s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + serviceDefault.CredentialScope.Service = "application-autoscaling" + serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com" + + if s.Defaults == nil { + s.Defaults = make(endpointDefaults) + } + + s.Defaults[defaultKey{}] = serviceDefault p.Services[serviceName] = s } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 553d611b..f6bb14d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -13,6 +13,8 @@ const ( AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. + AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition. + AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition. ) // AWS Standard partition's regions. @@ -23,15 +25,23 @@ const ( ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). + ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). CaCentral1RegionID = "ca-central-1" // Canada (Central). + CaWest1RegionID = "ca-west-1" // Canada West (Calgary). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuCentral2RegionID = "eu-central-2" // Europe (Zurich). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuSouth2RegionID = "eu-south-2" // Europe (Spain). EuWest1RegionID = "eu-west-1" // Europe (Ireland). EuWest2RegionID = "eu-west-2" // Europe (London). EuWest3RegionID = "eu-west-3" // Europe (Paris). + IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv). + MeCentral1RegionID = "me-central-1" // Middle East (UAE). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). @@ -55,6 +65,7 @@ const ( // AWS ISO (US) partition's regions. const ( UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. + UsIsoWest1RegionID = "us-iso-west-1" // US ISO WEST. ) // AWS ISOB (US) partition's regions. @@ -62,8 +73,14 @@ const ( UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). ) +// AWS ISOE (Europe) partition's regions. +const () + +// AWS ISOF partition's regions. +const () + // DefaultResolver returns an Endpoint resolver that will be able -// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // Use DefaultPartitions() to get the list of the default partitions. func DefaultResolver() Resolver { @@ -71,7 +88,7 @@ func DefaultResolver() Resolver { } // DefaultPartitions returns a list of the partitions the SDK is bundled -// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), and AWS ISOB (US). +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. // // partitions := endpoints.DefaultPartitions // for _, p := range partitions { @@ -87,6 +104,8 @@ var defaultPartitions = partitions{ awsusgovPartition, awsisoPartition, awsisobPartition, + awsisoePartition, + awsisofPartition, } // AwsPartition returns the Resolver for AWS Standard. @@ -100,14 +119,40 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$") return reg }(), }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, }, Regions: regions{ "af-south-1": region{ @@ -128,24 +173,42 @@ var awsPartition = partition{ "ap-south-1": region{ Description: "Asia Pacific (Mumbai)", }, + "ap-south-2": region{ + Description: "Asia Pacific (Hyderabad)", + }, "ap-southeast-1": region{ Description: "Asia Pacific (Singapore)", }, "ap-southeast-2": region{ Description: "Asia Pacific (Sydney)", }, + "ap-southeast-3": region{ + Description: "Asia Pacific (Jakarta)", + }, + "ap-southeast-4": region{ + Description: "Asia Pacific (Melbourne)", + }, "ca-central-1": region{ Description: "Canada (Central)", }, + "ca-west-1": region{ + Description: "Canada West (Calgary)", + }, "eu-central-1": region{ Description: "Europe (Frankfurt)", }, + "eu-central-2": region{ + Description: "Europe (Zurich)", + }, "eu-north-1": region{ Description: "Europe (Stockholm)", }, "eu-south-1": region{ Description: "Europe (Milan)", }, + "eu-south-2": region{ + Description: "Europe (Spain)", + }, "eu-west-1": region{ Description: "Europe (Ireland)", }, @@ -155,6 +218,12 @@ var awsPartition = partition{ "eu-west-3": region{ Description: "Europe (Paris)", }, + "il-central-1": region{ + Description: "Israel (Tel Aviv)", + }, + "me-central-1": region{ + Description: "Middle East (UAE)", + }, "me-south-1": region{ Description: "Middle East (Bahrain)", }, @@ -176,10013 +245,40552 @@ var awsPartition = partition{ }, Services: services{ "a4b": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, "access-analyzer": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "account": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "account.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, "acm": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ Hostname: "acm-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ Hostname: "acm-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ Hostname: "acm-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "acm-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "acm-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "acm-pca-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "acm-pca-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "acm-pca-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "acm-pca-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + }, + }, + "agreement-marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, "airflow": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "amplify": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "amplifybackend": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplifyuibuilder": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "aoss": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, "api.detective": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ Hostname: "api.detective-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ Hostname: "api.detective-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ Hostname: "api.detective-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ Hostname: "api.detective-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, "api.ecr": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ Hostname: "api.ecr.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, }, - "ap-east-1": endpoint{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ Hostname: "api.ecr.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, }, - "ap-northeast-1": endpoint{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ Hostname: "api.ecr.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, }, - "ap-northeast-2": endpoint{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ Hostname: "api.ecr.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, }, - "ap-northeast-3": endpoint{ + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ Hostname: "api.ecr.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, }, - "ap-south-1": endpoint{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{ Hostname: "api.ecr.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, }, - "ap-southeast-1": endpoint{ + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "api.ecr.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ Hostname: "api.ecr.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, }, - "ap-southeast-2": endpoint{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ Hostname: "api.ecr.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, - "ca-central-1": endpoint{ + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ Hostname: "api.ecr.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, }, - "eu-central-1": endpoint{ + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "api.ecr.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "dkr-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ Hostname: "api.ecr.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, - "eu-north-1": endpoint{ + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "api.ecr.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ Hostname: "api.ecr.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, }, - "eu-south-1": endpoint{ + endpointKey{ + Region: "eu-south-1", + }: endpoint{ Hostname: "api.ecr.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, }, - "eu-west-1": endpoint{ + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "api.ecr.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ Hostname: "api.ecr.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, }, - "eu-west-2": endpoint{ + endpointKey{ + Region: "eu-west-2", + }: endpoint{ Hostname: "api.ecr.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, }, - "eu-west-3": endpoint{ + endpointKey{ + Region: "eu-west-3", + }: endpoint{ Hostname: "api.ecr.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, }, - "fips-dkr-us-east-1": endpoint{ + endpointKey{ + Region: "fips-dkr-us-east-1", + }: endpoint{ Hostname: "ecr-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-dkr-us-east-2": endpoint{ + endpointKey{ + Region: "fips-dkr-us-east-2", + }: endpoint{ Hostname: "ecr-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-dkr-us-west-1": endpoint{ + endpointKey{ + Region: "fips-dkr-us-west-1", + }: endpoint{ Hostname: "ecr-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-dkr-us-west-2": endpoint{ + endpointKey{ + Region: "fips-dkr-us-west-2", + }: endpoint{ Hostname: "ecr-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "ecr-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "ecr-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ Hostname: "ecr-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "ecr-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "api.ecr.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "api.ecr.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, }, - "me-south-1": endpoint{ + endpointKey{ + Region: "me-south-1", + }: endpoint{ Hostname: "api.ecr.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, }, - "sa-east-1": endpoint{ + endpointKey{ + Region: "sa-east-1", + }: endpoint{ Hostname: "api.ecr.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, }, - "us-east-1": endpoint{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ Hostname: "api.ecr.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{ + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ Hostname: "api.ecr.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-1": endpoint{ + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ Hostname: "api.ecr.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, - "us-west-2": endpoint{ + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ Hostname: "api.ecr.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.ecr-public": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr-public.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr-public.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "api.elastic-inference": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", }, - "ap-northeast-2": endpoint{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", }, - "eu-west-1": endpoint{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{ Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", }, - "us-east-1": endpoint{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ Hostname: "api.elastic-inference.us-east-1.amazonaws.com", }, - "us-east-2": endpoint{ + endpointKey{ + Region: "us-east-2", + }: endpoint{ Hostname: "api.elastic-inference.us-east-2.amazonaws.com", }, - "us-west-2": endpoint{ + endpointKey{ + Region: "us-west-2", + }: endpoint{ Hostname: "api.elastic-inference.us-west-2.amazonaws.com", }, }, }, "api.fleethub.iot": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-ca-central-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.mediatailor": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "api.pricing": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "pricing", + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", }, - }, - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "us-east-1": endpoint{}, }, }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + "api.iotdeviceadvisor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-west-1", }, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, }, }, - "apigateway": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "app-integrations": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appflow": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appmesh": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "apprunner": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips": endpoint{ - Hostname: "appstream2-fips.us-west-2.amazonaws.com", + "api.iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-northeast-1", }, }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "appsync": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "aps": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "athena": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "athena-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-southeast-2", }, }, - "fips-us-east-2": endpoint{ - Hostname: "athena-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-central-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "athena-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-west-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "athena-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "api.iotwireless.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "api.mediatailor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, }, - }, - "backup": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, - "batch": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.batch.us-east-1.amazonaws.com", + "api.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "fips.batch.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "fips.batch.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", }, - "fips-us-west-2": endpoint{ - Hostname: "fips.batch.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "braket": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", }, - }, - }, - "ce": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "ce.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + }, + }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", }, - }, - }, - "chime": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "chime.us-east-1.amazonaws.com", - Protocols: []string{"https"}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - }, - }, - "cloud9": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudformation": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", }, }, }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", - }, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudsearch": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "codeartifact": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "app-integrations": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "codebuild": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-east-1.amazonaws.com", + "appflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codebuild-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codebuild-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", }, }, }, - "codecommit": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "codecommit-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "codedeploy": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + "appmesh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, - }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "codeguru-reviewer": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + "apprunner": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", }, - "fips-us-west-1": endpoint{ - Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + }, + }, + "appstream2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, CredentialScope: credentialScope{ - Region: "us-west-1", + Service: "appstream", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", }, - "fips-us-west-2": endpoint{ - Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "codestar": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "appsync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "codestar-connections": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "aps": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.api.aws", + }, + }, + }, + "auditmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "cognito-sync": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "comprehend-fips.us-east-1.amazonaws.com", + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "comprehend-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "comprehend-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "comprehendmedical": service{ - - Endpoints: endpoints{ - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", }, - "fips-us-east-2": endpoint{ - Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", }, - "fips-us-west-2": endpoint{ - Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, - "config": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "config-fips.us-east-1.amazonaws.com", + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "bedrock-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "config-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "bedrock-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-southeast-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "config-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "bedrock-eu-central-1", + }: endpoint{ + Hostname: "bedrock.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-central-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "config-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "bedrock-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "connect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "contact-lens": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "cur": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "data.jobs.iot": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "bedrock-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-west-2", }, }, - "fips-us-east-1": endpoint{ - Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "bedrock-runtime-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "bedrock-runtime-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-southeast-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "bedrock-runtime-eu-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-central-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "bedrock-runtime-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "data.mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "dataexchange": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datapipeline": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "datasync": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "datasync-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "bedrock-runtime-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-west-2", }, }, - "fips-us-east-1": endpoint{ - Hostname: "datasync-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "bedrock-runtime-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "datasync-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "bedrock-runtime-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-2", }, }, - "fips-us-west-1": endpoint{ - Hostname: "datasync-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "bedrock-us-east-1", + }: endpoint{ + Hostname: "bedrock.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "datasync-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "bedrock-us-west-2", + }: endpoint{ + Hostname: "bedrock.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "dax": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "billingconductor": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "billingconductor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, + "braket": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "directconnect": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "directconnect-fips.us-east-1.amazonaws.com", + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "budgets.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "directconnect-fips.us-east-2.amazonaws.com", + }, + }, + "cases": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "directconnect-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-west-2", }, - }, - "fips-us-west-2": endpoint{ - Hostname: "directconnect-fips.us-west-2.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", + }, + }, + }, + "catalog.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "discovery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - }, - "dms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "dms-fips": endpoint{ - Hostname: "dms-fips.us-west-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "chime.us-east-1.amazonaws.com", + Protocols: []string{"https"}, CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "docdb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", + "cleanrooms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloud9": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "ca-west-1", }, + Deprecated: boxedTrue, }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, + }, + }, + "clouddirectory": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", }, - "eu-west-3": endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-3", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{ - Hostname: "rds.sa-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "ds": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ds-fips.ca-central-1.amazonaws.com", + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-east-1", }, }, - "fips-us-east-1": endpoint{ - Hostname: "ds-fips.us-east-1.amazonaws.com", + }, + }, + "cloudhsm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudsearch": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "ds-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "ds-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "ds-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + "cloudtrail-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codeartifact": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "ebs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ebs-fips.ca-central-1.amazonaws.com", + "codecatalyst": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "codecatalyst.global.api.aws", + }, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, - }, - "fips-us-east-1": endpoint{ - Hostname: "ebs-fips.us-east-1.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "ebs-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "ebs-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "ebs-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ec2-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", }, - "fips-us-east-1": endpoint{ - Hostname: "ec2-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "ec2-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "ec2-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "ec2-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "ecs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "ecs-fips.us-east-1.amazonaws.com", + "codeguru-reviewer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "ecs-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "ecs-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "ecs-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.eks.us-east-1.amazonaws.com", + "codestar": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codestar-notifications": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "fips.eks.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "fips.eks.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "fips.eks.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "elasticache": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + "cognito-sync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + }, + }, + }, + "comprehendmedical": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-af-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "compute-optimizer.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, }, - "fips-ap-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, }, - "fips-ap-northeast-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, }, - "fips-ap-northeast-2": endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, }, - "fips-ap-northeast-3": endpoint{ - Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, }, - "fips-ap-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, }, - "fips-ap-southeast-1": endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, }, - "fips-ap-southeast-2": endpoint{ - Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, }, - "fips-ca-central-1": endpoint{ - Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "compute-optimizer.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, }, - "fips-eu-central-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, }, - "fips-eu-north-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-north-1", }, }, - "fips-eu-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-south-1", }, }, - "fips-eu-west-1": endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, }, - "fips-eu-west-2": endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, }, - "fips-eu-west-3": endpoint{ - Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-3", }, }, - "fips-me-south-1": endpoint{ - Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "compute-optimizer.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "compute-optimizer.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "compute-optimizer.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, }, - "fips-sa-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "compute-optimizer.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, }, - "fips-us-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "compute-optimizer.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "compute-optimizer.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "fips-us-west-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "compute-optimizer.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "compute-optimizer.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + "connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-2", }, - }, - "fips-us-east-2": endpoint{ - Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + }, + }, + }, + "connect-campaigns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elastictranscoder": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "emr-containers": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + "contact-lens": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, - }, - "fips-us-east-1": endpoint{ - Hostname: "emr-containers-fips.us-east-1.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "emr-containers-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "emr-containers-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", + "cost-optimization-hub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "cost-optimization-hub.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, + }, + "cur": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, - "es": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, CredentialScope: credentialScope{ - Region: "us-west-1", - }, + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "events": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "events-fips.us-east-1.amazonaws.com", + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "events-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "events-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "events-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "finspace": service{ - - Endpoints: endpoints{ - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "finspace-api": service{ - - Endpoints: endpoints{ - "ca-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, - "firehose": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "firehose-fips.us-east-1.amazonaws.com", + "data.mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "firehose-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "firehose-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "firehose-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-af-south-1": endpoint{ - Hostname: "fms-fips.af-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "af-south-1", - }, + "dataexchange": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datapipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", }, - "fips-ap-east-1": endpoint{ - Hostname: "fms-fips.ap-east-1.amazonaws.com", + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-1": endpoint{ - Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "ca-west-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-2": endpoint{ - Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-ap-south-1": endpoint{ - Hostname: "fms-fips.ap-south-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-1": endpoint{ - Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-2": endpoint{ - Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", }, - "fips-ca-central-1": endpoint{ - Hostname: "fms-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", }, - "fips-eu-central-1": endpoint{ - Hostname: "fms-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", }, - "fips-eu-south-1": endpoint{ - Hostname: "fms-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", }, - "fips-eu-west-1": endpoint{ - Hostname: "fms-fips.eu-west-1.amazonaws.com", + }, + }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "datazone.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "datazone.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "datazone.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "datazone.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "datazone.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "datazone.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "datazone.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "datazone.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "datazone.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "datazone.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "datazone.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "datazone.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "datazone.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "datazone.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "datazone.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "datazone.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "datazone.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "datazone.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "datazone.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "datazone.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "datazone.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "datazone.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "datazone.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "datazone.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "datazone.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "datazone.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "datazone.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "datazone.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "datazone.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dax": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "devicefarm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "devops-guru": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-2": endpoint{ - Hostname: "fms-fips.eu-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-3": endpoint{ - Hostname: "fms-fips.eu-west-3.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-3", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-me-south-1": endpoint{ - Hostname: "fms-fips.me-south-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "me-south-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-sa-east-1": endpoint{ - Hostname: "fms-fips.sa-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "sa-east-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", }, - "fips-us-east-1": endpoint{ - Hostname: "fms-fips.us-east-1.amazonaws.com", + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "fms-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "fms-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "fms-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "forecast": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "forecast-fips.us-east-1.amazonaws.com", + "discovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "dms", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "forecast-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "forecast-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-west-1", }, - }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "forecastquery": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "forecastquery-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "frauddetector": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, - "fsx": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-prod-ca-central-1": endpoint{ - Hostname: "fsx-fips.ca-central-1.amazonaws.com", + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "ap-northeast-1", }, }, - "fips-prod-us-east-1": endpoint{ - Hostname: "fsx-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-2", }, }, - "fips-prod-us-east-2": endpoint{ - Hostname: "fsx-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-south-1", }, }, - "fips-prod-us-west-1": endpoint{ - Hostname: "fsx-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-southeast-1", }, }, - "fips-prod-us-west-2": endpoint{ - Hostname: "fsx-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-southeast-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "glacier-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, }, - "fips-us-east-1": endpoint{ - Hostname: "glacier-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-central-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "glacier-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-west-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "glacier-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-west-2", }, }, - "fips-us-west-2": endpoint{ - Hostname: "glacier-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-west-3", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "glue-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "sa-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "glue-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "glue-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-2", }, }, - "fips-us-west-2": endpoint{ - Hostname: "glue-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, - "groundstation": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "groundstation-fips.us-east-1.amazonaws.com", + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "groundstation-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "groundstation-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "guardduty-fips.us-east-1.amazonaws.com", + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "guardduty-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "guardduty-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "guardduty-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", }, - }, - }, - "health": service{ - - Endpoints: endpoints{ - "fips-us-east-2": endpoint{ - Hostname: "health-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", }, }, }, - "healthlake": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "honeycode": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", }, - "iam-fips": endpoint{ - Hostname: "iam-fips.amazonaws.com", + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ca-central-1", }, - }, - }, - }, - "identity-chime": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "identity-chime-fips.us-east-1.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, CredentialScope: credentialScope{ Region: "us-east-1", }, }, - }, - }, - "identitystore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", - }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", }, - }, - }, - "inspector": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "inspector-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "inspector-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "inspector-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "inspector-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "iot-fips.ca-central-1.amazonaws.com", + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "iot-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "ca-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "iot-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "iot-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "iot-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "iotanalytics": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, - "iotevents": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - }, - "ioteventsdata": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-1", - }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", }, - "ap-northeast-2": endpoint{ - Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-1": endpoint{ - Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "eu-central-1": endpoint{ - Hostname: "data.iotevents.eu-central-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "eu-west-1": endpoint{ - Hostname: "data.iotevents.eu-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", }, - "eu-west-2": endpoint{ - Hostname: "data.iotevents.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", }, - "us-east-1": endpoint{ - Hostname: "data.iotevents.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Hostname: "data.iotevents.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "data.iotevents.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", }, }, }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + "edge.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, }, - "fips-us-east-1": endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "iotthingsgraph": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "iotthingsgraph", + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "eks-auth.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "eks-auth.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "eks-auth.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "eks-auth.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "eks-auth.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "eks-auth.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "eks-auth.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "eks-auth.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "eks-auth.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "eks-auth.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "eks-auth.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "eks-auth.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "eks-auth.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "eks-auth.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "eks-auth.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "eks-auth.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "eks-auth.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "eks-auth.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "eks-auth.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "eks-auth.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "eks-auth.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "eks-auth.us-west-2.api.aws", }, }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, }, - "iotwireless": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "us-west-1", }, - }, - "ap-southeast-2": endpoint{ - Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "eu-west-1": endpoint{ - Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Hostname: "api.iotwireless.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "api.iotwireless.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "ivs": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kafka": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kafkaconnect": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesis": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "kinesis-fips.us-east-1.amazonaws.com", + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "kinesis-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "kinesis-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "kinesis-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kinesisvideo": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "lakeformation": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "af-south-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-northeast-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-northeast-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lambda": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "lambda-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-3", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "lambda-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-south-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "lambda-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-south-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "lambda-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-southeast-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "license-manager-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-southeast-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "license-manager-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-southeast-3", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "license-manager-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-southeast-4", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "license-manager-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lightsail": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "logs": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "logs-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "logs-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-central-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "logs-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-north-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "logs-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-south-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lookoutequipment": service{ - - Endpoints: endpoints{ - "ap-northeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "lookoutvision": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "macie": service{ - - Endpoints: endpoints{ - "fips-us-east-1": endpoint{ - Hostname: "macie-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-south-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "macie-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-west-1", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "macie2": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "macie2-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-west-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "macie2-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-west-3", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "macie2-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "il-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "macie2-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "me-central-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "managedblockchain": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "mediaconnect": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "me-south-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "medialive": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "medialive-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", }, - "fips-us-east-2": endpoint{ - Hostname: "medialive-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", }, - "fips-us-west-2": endpoint{ - Hostname: "medialive-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediapackage": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mediapackage-vod": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "mediastore": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - }, - "messaging-chime": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mgh": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "models-v2-lex": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "models-fips.lex.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "models-fips.lex.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", }, }, }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "monitoring-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", }, - "fips-us-east-2": endpoint{ - Hostname: "monitoring-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", }, - "fips-us-west-1": endpoint{ - Hostname: "monitoring-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "monitoring-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ca-west-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "mq": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "mq-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "mq-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "mq-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "mq-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, + "elastictranscoder": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "neptune": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "rds.ap-east-1.amazonaws.com", + "email": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "ap-northeast-1": endpoint{ - Hostname: "rds.ap-northeast-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-northeast-2": endpoint{ - Hostname: "rds.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "ap-south-1": endpoint{ - Hostname: "rds.ap-south-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-1": endpoint{ - Hostname: "rds.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", }, - "ap-southeast-2": endpoint{ - Hostname: "rds.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", }, - "ca-central-1": endpoint{ - Hostname: "rds.ca-central-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "eu-central-1": endpoint{ - Hostname: "rds.eu-central-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "eu-north-1": endpoint{ - Hostname: "rds.eu-north-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-north-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "eu-west-1": endpoint{ - Hostname: "rds.eu-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "eu-west-2": endpoint{ - Hostname: "rds.eu-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-2", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", }, - "eu-west-3": endpoint{ - Hostname: "rds.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", }, - "me-south-1": endpoint{ - Hostname: "rds.me-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "me-south-1", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", }, - "sa-east-1": endpoint{ - Hostname: "rds.sa-east-1.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + }, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "sa-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Hostname: "rds.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Hostname: "rds.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{ - Hostname: "rds.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "rds.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", }, }, }, - "network-firewall": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + "entitlement.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ CredentialScope: credentialScope{ - Region: "ca-central-1", + Service: "aws-marketplace", }, }, - "fips-us-east-1": endpoint{ - Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", }, - "fips-us-west-1": endpoint{ - Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "oidc": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "oidc.ap-northeast-1.amazonaws.com", + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-northeast-2": endpoint{ - Hostname: "oidc.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "ap-south-1": endpoint{ - Hostname: "oidc.ap-south-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-1": endpoint{ - Hostname: "oidc.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", }, - "ap-southeast-2": endpoint{ - Hostname: "oidc.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", }, - "ca-central-1": endpoint{ - Hostname: "oidc.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", }, - "eu-central-1": endpoint{ - Hostname: "oidc.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", }, - "eu-north-1": endpoint{ - Hostname: "oidc.eu-north-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-north-1", - }, + }, + }, + "evidently": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "evidently.ap-northeast-1.amazonaws.com", }, - "eu-west-1": endpoint{ - Hostname: "oidc.eu-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "evidently.ap-southeast-1.amazonaws.com", }, - "eu-west-2": endpoint{ - Hostname: "oidc.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "evidently.ap-southeast-2.amazonaws.com", }, - "eu-west-3": endpoint{ - Hostname: "oidc.eu-west-3.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "evidently.eu-central-1.amazonaws.com", }, - "us-east-1": endpoint{ - Hostname: "oidc.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "evidently.eu-north-1.amazonaws.com", }, - "us-east-2": endpoint{ - Hostname: "oidc.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "evidently.eu-west-1.amazonaws.com", }, - "us-west-2": endpoint{ - Hostname: "oidc.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "evidently.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "evidently.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "evidently.us-west-2.amazonaws.com", }, }, }, - "opsworks": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "opsworks-cm": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "finspace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "fips-aws-global": endpoint{ - Hostname: "organizations-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + "finspace-api": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "outposts": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "outposts-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "outposts-fips.us-east-1.amazonaws.com", + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "outposts-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "outposts-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "outposts-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "personalize": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - }, - "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "pinpoint-fips.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", }, - "fips-us-west-2": endpoint{ - Hostname: "pinpoint-fips.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", }, - "us-east-1": endpoint{ - Hostname: "pinpoint.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", }, - "us-west-2": endpoint{ - Hostname: "pinpoint.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "af-south-1", }, + Deprecated: boxedTrue, }, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "polly-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "polly-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-northeast-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "polly-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-northeast-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "polly-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-south-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "portal.sso": service{ - - Endpoints: endpoints{ - "ap-southeast-1": endpoint{ - Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{ - Hostname: "portal.sso.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "eu-central-1": endpoint{ - Hostname: "portal.sso.eu-central-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-central-1", }, + Deprecated: boxedTrue, }, - "eu-west-1": endpoint{ - Hostname: "portal.sso.eu-west-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-1", }, + Deprecated: boxedTrue, }, - "eu-west-2": endpoint{ - Hostname: "portal.sso.eu-west-2.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "eu-west-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Hostname: "portal.sso.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Hostname: "portal.sso.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "portal.sso.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", }, }, }, - "profile": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "projects.iot1click": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "qldb-fips.us-east-1.amazonaws.com", + "forecast": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "qldb-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "qldb-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, }, }, - "ram": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ram-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "ram-fips.us-east-1.amazonaws.com", + "forecastquery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "ram-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "ram-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "ram-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "rds": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "rds-fips.ca-central-1": endpoint{ - Hostname: "rds-fips.ca-central-1.amazonaws.com", + "frauddetector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "rds-fips.us-east-1": endpoint{ - Hostname: "rds-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-prod-ca-central-1", + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "rds-fips.us-east-2": endpoint{ - Hostname: "rds-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-prod-us-east-2", + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "rds-fips.us-west-1": endpoint{ - Hostname: "rds-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-prod-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "rds-fips.us-west-2": endpoint{ - Hostname: "rds-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-prod-us-west-2", + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "redshift-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - }, - "fips-us-east-1": endpoint{ - Hostname: "redshift-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "redshift-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "redshift-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "redshift-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, - }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rekognition": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "rekognition-fips.ca-central-1": endpoint{ - Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "prod-ca-central-1", + }: endpoint{ CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "rekognition-fips.us-east-1": endpoint{ - Hostname: "rekognition-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "prod-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "rekognition-fips.us-east-2": endpoint{ - Hostname: "rekognition-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "prod-us-east-1", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "rekognition-fips.us-west-1": endpoint{ - Hostname: "rekognition-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "prod-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "rekognition-fips.us-west-2": endpoint{ - Hostname: "rekognition-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "prod-us-east-2", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "resource-groups": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "prod-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "prod-us-west-1", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "prod-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "prod-us-west-2", + }: endpoint{ CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "robomaker": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", + endpointKey{ + Region: "prod-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", }, - "fips-aws-global": endpoint{ - Hostname: "route53-fips.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", }, - }, - }, - "route53-recovery-control-config": service{ - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", }, }, }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "route53resolver": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "gamelift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "runtime-v2-lex": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "glacier": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - }, - }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", }, - }, - }, - "s3": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "accesspoint-af-south-1": endpoint{ - Hostname: "s3-accesspoint.af-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-ap-east-1": endpoint{ - Hostname: "s3-accesspoint.ap-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-ap-northeast-1": endpoint{ - Hostname: "s3-accesspoint.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-ap-northeast-2": endpoint{ - Hostname: "s3-accesspoint.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-ap-northeast-3": endpoint{ - Hostname: "s3-accesspoint.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-ap-south-1": endpoint{ - Hostname: "s3-accesspoint.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-ap-southeast-1": endpoint{ - Hostname: "s3-accesspoint.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-ap-southeast-2": endpoint{ - Hostname: "s3-accesspoint.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-ca-central-1": endpoint{ - Hostname: "s3-accesspoint.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-eu-central-1": endpoint{ - Hostname: "s3-accesspoint.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-eu-north-1": endpoint{ - Hostname: "s3-accesspoint.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-eu-south-1": endpoint{ - Hostname: "s3-accesspoint.eu-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-eu-west-1": endpoint{ - Hostname: "s3-accesspoint.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-eu-west-2": endpoint{ - Hostname: "s3-accesspoint.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-eu-west-3": endpoint{ - Hostname: "s3-accesspoint.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-me-south-1": endpoint{ - Hostname: "s3-accesspoint.me-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-sa-east-1": endpoint{ - Hostname: "s3-accesspoint.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-us-east-1": endpoint{ - Hostname: "s3-accesspoint.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-us-east-2": endpoint{ - Hostname: "s3-accesspoint.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-us-west-1": endpoint{ - Hostname: "s3-accesspoint.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "accesspoint-us-west-2": endpoint{ - Hostname: "s3-accesspoint.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{ - Hostname: "s3.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", }, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", }, - "ap-southeast-2": endpoint{ - Hostname: "s3.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", }, - "aws-global": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-accesspoint-ca-central-1": endpoint{ - Hostname: "s3-accesspoint-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "fips-accesspoint-us-east-1": endpoint{ - Hostname: "s3-accesspoint-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "fips-accesspoint-us-east-2": endpoint{ - Hostname: "s3-accesspoint-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "fips-accesspoint-us-west-1": endpoint{ - Hostname: "s3-accesspoint-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - }, - "fips-accesspoint-us-west-2": endpoint{ - Hostname: "s3-accesspoint-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{ - Hostname: "s3.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", }, - "us-east-1": endpoint{ - Hostname: "s3.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", }, - "us-west-2": endpoint{ - Hostname: "s3.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", }, }, }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3-control.ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + "grafana": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "grafana.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, }, - "ap-northeast-2": endpoint{ - Hostname: "s3-control.ap-northeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "grafana.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, }, - "ap-northeast-3": endpoint{ - Hostname: "s3-control.ap-northeast-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "grafana.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-3", + Region: "ap-southeast-1", }, }, - "ap-south-1": endpoint{ - Hostname: "s3-control.ap-south-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "grafana.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "ap-southeast-2", }, }, - "ap-southeast-1": endpoint{ - Hostname: "s3-control.ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "grafana.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "eu-central-1", }, }, - "ap-southeast-2": endpoint{ - Hostname: "s3-control.ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "grafana.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "eu-west-1", }, }, - "ca-central-1": endpoint{ - Hostname: "s3-control.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "grafana.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "eu-west-2", }, }, - "ca-central-1-fips": endpoint{ - Hostname: "s3-control-fips.ca-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "grafana.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-east-1", }, }, - "eu-central-1": endpoint{ - Hostname: "s3-control.eu-central-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "grafana.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "us-east-2", }, }, - "eu-north-1": endpoint{ - Hostname: "s3-control.eu-north-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "grafana.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-north-1", + Region: "us-west-2", }, }, - "eu-west-1": endpoint{ - Hostname: "s3-control.eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-1", - }, - }, - "eu-west-2": endpoint{ - Hostname: "s3-control.eu-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, - }, - "eu-west-3": endpoint{ - Hostname: "s3-control.eu-west-3.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "eu-west-3", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3-control.sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "sa-east-1", - }, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, }, - "us-east-1": endpoint{ - Hostname: "s3-control.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "us-east-1-fips": endpoint{ - Hostname: "s3-control-fips.us-east-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Hostname: "s3-control.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-east-2-fips": endpoint{ - Hostname: "s3-control-fips.us-east-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", }, - "us-west-1": endpoint{ - Hostname: "s3-control.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + }, + }, + "groundstation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-west-1-fips": endpoint{ - Hostname: "s3-control-fips.us-west-1.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "s3-control.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", }, - "us-west-2-fips": endpoint{ - Hostname: "s3-control-fips.us-west-2.amazonaws.com", - SignatureVersions: []string{"s3v4"}, - CredentialScope: credentialScope{ - Region: "us-west-2", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", }, - }, - }, - "savingsplans": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "savingsplans.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", }, }, }, - "schemas": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "securityhub": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "securityhub-fips.us-east-1.amazonaws.com", + "health": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "securityhub-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "securityhub-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "securityhub-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-northeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ap-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-1": endpoint{ - Protocols: []string{"https"}, - }, - "ap-southeast-2": endpoint{ - Protocols: []string{"https"}, - }, - "ca-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-central-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-north-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-2": endpoint{ - Protocols: []string{"https"}, - }, - "eu-west-3": endpoint{ - Protocols: []string{"https"}, - }, - "me-south-1": endpoint{ - Protocols: []string{"https"}, - }, - "sa-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-east-2": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-1": endpoint{ - Protocols: []string{"https"}, - }, - "us-west-2": endpoint{ + "healthlake": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ Protocols: []string{"https"}, }, }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + "honeycode": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "iam.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - }, - }, - "servicecatalog-appregistry": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "iam", + }: endpoint{ CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "iam", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "iam-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, + }, + }, + "identity-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", }, - "fips-us-west-2": endpoint{ - Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "servicediscovery": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "servicediscovery-fips": endpoint{ - Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-east-1", + Service: "IngestionService", }, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "servicequotas": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "session.qldb": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "ingest-fips-us-east-1", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "ingest-fips-us-east-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "ingest-fips-us-west-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "shield": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "shield.us-east-1.amazonaws.com", + endpointKey{ + Region: "ingest-us-east-1", + }: endpoint{ CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-aws-global": endpoint{ - Hostname: "shield-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "ingest-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - }, - }, - "sms": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sms-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "ingest-us-east-2", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "sms-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "ingest-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "sms-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "ingest-us-west-2", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "sms-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "ingest-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "snowball": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ap-northeast-1": endpoint{ - Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + "inspector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-2": endpoint{ - Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-3": endpoint{ - Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-3", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-ap-south-1": endpoint{ - Hostname: "snowball-fips.ap-south-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-1": endpoint{ - Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-1", - }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", }, - "fips-ap-southeast-2": endpoint{ - Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-southeast-2", - }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", }, - "fips-ca-central-1": endpoint{ - Hostname: "snowball-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", }, - "fips-eu-central-1": endpoint{ - Hostname: "snowball-fips.eu-central-1.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + }, + }, + }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-1": endpoint{ - Hostname: "snowball-fips.eu-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-eu-west-2": endpoint{ - Hostname: "snowball-fips.eu-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-2", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-3": endpoint{ - Hostname: "snowball-fips.eu-west-3.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-3", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "internetmonitor.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "internetmonitor.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "internetmonitor.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "internetmonitor.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "internetmonitor.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "internetmonitor.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "internetmonitor.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "internetmonitor.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "internetmonitor.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "internetmonitor.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "internetmonitor.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "internetmonitor.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "internetmonitor.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "internetmonitor.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "internetmonitor.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "internetmonitor.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "internetmonitor.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "internetmonitor.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "internetmonitor.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "internetmonitor.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-2.amazonaws.com", }, - "fips-sa-east-1": endpoint{ - Hostname: "snowball-fips.sa-east-1.amazonaws.com", + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "sa-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "snowball-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iotevents-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "snowball-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotevents-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "snowball-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "fips-us-west-2": endpoint{ - Hostname: "snowball-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iotevents-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-west-2.amazonaws.com", }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sns-fips.us-east-1.amazonaws.com", + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "sns-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-northeast-2", }, }, - "fips-us-west-1": endpoint{ - Hostname: "sns-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "data.iotevents.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-south-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "sns-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-southeast-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "sqs-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-southeast-2", }, }, - "fips-us-east-2": endpoint{ - Hostname: "sqs-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "data.iotevents.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ca-central-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "sqs-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ca-central-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "sqs-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-central-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "ssm-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "ssm-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "ssm-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "ssm-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "ssm-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-1", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ssm-incidents": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "states": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "states-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "states-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "fips-us-west-1": endpoint{ - Hostname: "states-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-2", }, }, - "fips-us-west-2": endpoint{ - Hostname: "states-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "storagegateway": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "ca-central-1-fips": endpoint{ - Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, + "iotfleetwise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "iotroborunner": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", }, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", }, }, }, - "sts": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "aws-global": endpoint{ - Hostname: "sts.amazonaws.com", + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "sts-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "sts-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{}, - "us-west-1-fips": endpoint{ - Hostname: "sts-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - }, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "sts-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", }, }, }, - "support": service{ - PartitionEndpoint: "aws-global", - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "support.us-east-1.amazonaws.com", + "iotthingsgraph": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "api-ap-northeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-1", }, }, - }, - }, - "swf": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "swf-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "api-ap-northeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-2", }, }, - "fips-us-east-2": endpoint{ - Hostname: "swf-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "api-ap-south-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-south-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "swf-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "api-ap-southeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "ap-southeast-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "swf-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "api-ap-southeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-southeast-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "fips.transcribe.us-east-1.amazonaws.com", + endpointKey{ + Region: "api-eu-central-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-central-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "fips.transcribe.us-east-2.amazonaws.com", + endpointKey{ + Region: "api-eu-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-west-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "fips.transcribe.us-west-1.amazonaws.com", + endpointKey{ + Region: "api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "fips.transcribe.us-west-2.amazonaws.com", + endpointKey{ + Region: "api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transcribestreaming": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "transcribestreaming-fips-ca-central-1": endpoint{ - Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "data-ap-northeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "ap-northeast-1", }, }, - "transcribestreaming-fips-us-east-1": endpoint{ - Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "data-ap-northeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ap-northeast-2", }, }, - "transcribestreaming-fips-us-east-2": endpoint{ - Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "data-ap-south-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "ap-south-1", }, }, - "transcribestreaming-fips-us-west-2": endpoint{ - Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "data-ap-southeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "ap-southeast-1", }, }, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "transfer": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-ca-central-1": endpoint{ - Hostname: "transfer-fips.ca-central-1.amazonaws.com", + endpointKey{ + Region: "data-ap-southeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "ap-southeast-2", }, }, - "fips-us-east-1": endpoint{ - Hostname: "transfer-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "data-eu-central-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-central-1", }, }, - "fips-us-east-2": endpoint{ - Hostname: "transfer-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "data-eu-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-west-1", }, }, - "fips-us-west-1": endpoint{ - Hostname: "transfer-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "us-east-1", }, }, - "fips-us-west-2": endpoint{ - Hostname: "transfer-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "us-east-1": endpoint{}, - "us-east-1-fips": endpoint{ - Hostname: "translate-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "us-west-2", }, }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-fips": endpoint{ - Hostname: "waf-fips.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", }, }, }, - "waf-regional": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{ - Hostname: "waf-regional.af-south-1.amazonaws.com", + "iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "af-south-1", + Region: "ap-northeast-1", }, }, - "ap-east-1": endpoint{ - Hostname: "waf-regional.ap-east-1.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-east-1", + Region: "ap-southeast-2", }, }, - "ap-northeast-1": endpoint{ - Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-1", + Region: "eu-west-1", }, }, - "ap-northeast-2": endpoint{ - Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-2", + Region: "us-east-1", }, }, - "ap-northeast-3": endpoint{ - Hostname: "waf-regional.ap-northeast-3.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-northeast-3", + Region: "us-west-2", }, }, - "ap-south-1": endpoint{ - Hostname: "waf-regional.ap-south-1.amazonaws.com", + }, + }, + "ivs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ivschat": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ivsrealtime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-south-1", + Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-1": endpoint{ - Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "ap-southeast-2": endpoint{ - Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "ap-southeast-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "ca-central-1": endpoint{ - Hostname: "waf-regional.ca-central-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "ca-central-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "eu-central-1": endpoint{ - Hostname: "waf-regional.eu-central-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-central-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", }, - "eu-north-1": endpoint{ - Hostname: "waf-regional.eu-north-1.amazonaws.com", + }, + }, + "kafkaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-north-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "eu-south-1": endpoint{ - Hostname: "waf-regional.eu-south-1.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-south-1", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "eu-west-1": endpoint{ - Hostname: "waf-regional.eu-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "kendra-ranking.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "kendra-ranking.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "kendra-ranking.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "kendra-ranking.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "kendra-ranking.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "kendra-ranking.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "kendra-ranking.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-west-2.api.aws", }, - "eu-west-2": endpoint{ - Hostname: "waf-regional.eu-west-2.amazonaws.com", + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-2", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "eu-west-3": endpoint{ - Hostname: "waf-regional.eu-west-3.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-3", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-af-south-1": endpoint{ - Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "af-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "af-south-1", }, + Deprecated: boxedTrue, }, - "fips-ap-east-1": endpoint{ - Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-east-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-1": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-1", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-2": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-2", }, + Deprecated: boxedTrue, }, - "fips-ap-northeast-3": endpoint{ - Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-northeast-3", }, + Deprecated: boxedTrue, }, - "fips-ap-south-1": endpoint{ - Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-south-1", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-1": endpoint{ - Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-1", }, + Deprecated: boxedTrue, }, - "fips-ap-southeast-2": endpoint{ - Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ Region: "ap-southeast-2", }, - }, - "fips-ca-central-1": endpoint{ - Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ Region: "ca-central-1", }, + Deprecated: boxedTrue, }, - "fips-eu-central-1": endpoint{ - Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-central-1", - }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", }, - "fips-eu-north-1": endpoint{ - Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-north-1", + Region: "ca-west-1", }, + Deprecated: boxedTrue, }, - "fips-eu-south-1": endpoint{ - Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-south-1", - }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", }, - "fips-eu-west-1": endpoint{ - Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + endpointKey{ + Region: "eu-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-1", + Region: "eu-central-1", }, + Deprecated: boxedTrue, }, - "fips-eu-west-2": endpoint{ - Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "eu-west-2", - }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", }, - "fips-eu-west-3": endpoint{ - Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + endpointKey{ + Region: "eu-central-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "eu-west-3", + Region: "eu-central-2", }, + Deprecated: boxedTrue, }, - "fips-me-south-1": endpoint{ - Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "me-south-1", + Region: "eu-north-1", }, + Deprecated: boxedTrue, }, - "fips-sa-east-1": endpoint{ - Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "sa-east-1", + Region: "eu-south-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-1": endpoint{ - Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "eu-south-2", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-2", + Region: "eu-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-1", + Region: "eu-west-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "eu-west-3", }, - }, - "me-south-1": endpoint{ - Hostname: "waf-regional.me-south-1.amazonaws.com", + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "il-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", CredentialScope: credentialScope{ Region: "me-south-1", }, + Deprecated: boxedTrue, }, - "sa-east-1": endpoint{ - Hostname: "waf-regional.sa-east-1.amazonaws.com", + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "sa-east-1", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{ - Hostname: "waf-regional.us-east-1.amazonaws.com", + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "us-east-2": endpoint{ - Hostname: "waf-regional.us-east-2.amazonaws.com", + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-west-1": endpoint{ - Hostname: "waf-regional.us-west-1.amazonaws.com", + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "us-west-2": endpoint{ - Hostname: "waf-regional.us-west-2.amazonaws.com", + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, }, }, }, - "workdocs": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "workdocs-fips.us-east-1.amazonaws.com", + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "workdocs-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workmail": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "workspaces": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "workspaces-fips.us-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "workspaces-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "xray": service{ - - Endpoints: endpoints{ - "af-south-1": endpoint{}, - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-northeast-3": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-south-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips-us-east-1": endpoint{ - Hostname: "xray-fips.us-east-1.amazonaws.com", + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-east-2": endpoint{ - Hostname: "xray-fips.us-east-2.amazonaws.com", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, + Deprecated: boxedTrue, }, - "fips-us-west-1": endpoint{ - Hostname: "xray-fips.us-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-west-2": endpoint{ - Hostname: "xray-fips.us-west-2.amazonaws.com", + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", }, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - }, -} - -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() -} - -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", - }, - "cn-northwest-1": region{ - Description: "China (Ningxia)", - }, - }, - Services: services{ - "access-analyzer": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, }, }, - "acm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + }, }, }, - "api.ecr": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-east-1", }, + Deprecated: boxedTrue, }, - "cn-northwest-1": endpoint{ - Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", }, }, }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "lightsail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "apigateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + }, }, }, - "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "lookoutequipment": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "lookoutmetrics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "appsync": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "lookoutvision": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, }, }, - "athena": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "machinelearning": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "macie2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + }, }, }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "managedblockchain": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "managedblockchain-query": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, }, }, - "backup": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "marketplacecommerceanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "media-pipelines-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, }, }, - "batch": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "mediaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "budgets": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "budgets.amazonaws.com.cn", + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", }, }, }, - "ce": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", }, }, }, - "cloudformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, }, }, - "cloudfront": service{ - PartitionEndpoint: "aws-cn-global", + "mediapackage-vod": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackagev2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "memory-db-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "messaging-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + }, + }, + }, + "migrationhub-orchestrator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "migrationhub-strategy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mobileanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "sandbox", + }: endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "networkmanager.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "nimble": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "oidc.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "omics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "omics.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "omics.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "omics.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "omics.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "omics.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "omics.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "omics.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "opsworks-cm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "osis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + }, + }, + }, + "participant.connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "participant.connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "participant.connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect-fips.us-west-2.amazonaws.com", + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "pinpoint.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "pinpoint.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "portal.sso.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "profile": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + }, + }, + }, + "projects.iot1click": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "proton": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "qbusiness.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "qbusiness.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "qbusiness.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "qbusiness.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "qbusiness.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "qbusiness.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "qbusiness.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "qbusiness.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "qbusiness.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "qbusiness.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "qbusiness.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "qbusiness.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "qbusiness.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "qbusiness.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "qbusiness.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "qbusiness.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "qbusiness.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "qbusiness.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "qbusiness.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "qbusiness.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "qbusiness.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "qbusiness.us-west-2.api.aws", + }, + }, + }, + "qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "rds-fips.ca-central-1", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.ca-west-1", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-1", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-2", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-1", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-2", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "rds-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + }, + }, + }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rekognition": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "rekognition-fips.ca-central-1", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-1", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-2", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-2", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "resource-explorer-2.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "resource-explorer-2.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "resource-explorer-2.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "resource-explorer-2.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "resource-explorer-2.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "resource-explorer-2.us-west-2.api.aws", + }, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + }, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + }, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "route53-recovery-control-config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "route53domains": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rum": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "s3-external-1", + }: endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "s3-control.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + }, + }, + "sagemaker-geospatial": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "scheduler": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "sdb.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + }, + }, + }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog-appregistry": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + }, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "session.qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "verification-af-south-1", + }: endpoint{ + Hostname: "verification.signer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-east-1", + }: endpoint{ + Hostname: "verification.signer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "verification-ap-south-1", + }: endpoint{ + Hostname: "verification.signer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "verification-ca-central-1", + }: endpoint{ + Hostname: "verification.signer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-central-1", + }: endpoint{ + Hostname: "verification.signer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-north-1", + }: endpoint{ + Hostname: "verification.signer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "verification-eu-south-1", + }: endpoint{ + Hostname: "verification.signer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-1", + }: endpoint{ + Hostname: "verification.signer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-2", + }: endpoint{ + Hostname: "verification.signer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "verification-eu-west-3", + }: endpoint{ + Hostname: "verification.signer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "verification-me-south-1", + }: endpoint{ + Hostname: "verification.signer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "verification-sa-east-1", + }: endpoint{ + Hostname: "verification.signer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + }, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-contacts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-incidents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-sap": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "supportapp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + }, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + }, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", + }, + }, + }, + "thinclient": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "tnb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-ca-central-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-east-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-east-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-west-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "voice-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "voiceid": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + }, + }, + }, + "vpc-lattice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "waf-regional.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "waf-regional.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "waf-regional.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "waf-regional.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "waf-regional.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "wafv2.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "wafv2.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "wafv2.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "wafv2.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "wafv2.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "wafv2.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "wafv2.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "wafv2.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "wafv2.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "wafv2.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "wafv2.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "wafv2.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "wafv2.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "wafv2.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "wafv2.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "wafv2.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "wafv2.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "wafv2.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "wafv2.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "wafv2.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "wafv2.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "wafv2.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "wafv2.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "wafv2.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "wafv2.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "wafv2.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "wafv2.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "wafv2.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "wisdom": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ui-ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "ui-us-east-1", + }: endpoint{}, + endpointKey{ + Region: "ui-us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "workdocs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "workmail": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + }, + }, + }, + "workspaces-web": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + }, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "account": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "account.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "acm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.tunneling.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appmesh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "appsync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "batch": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cur": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "dax": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "gamelift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "health": service{ + PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "iotanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-cn-north-1", + }: endpoint{ + Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "data-cn-north-1", + }: endpoint{ + Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "rds.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "route53resolver": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "cn-northwest-1", }, }, }, }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "savingsplans": service{ + IsRegionalized: boxedTrue, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "savingsplans.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, - "codebuild": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, - "codecommit": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{}, }, }, - "codedeploy": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Protocols: []string{"https"}, + }, }, }, - "config": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, - "cur": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, - "data.jobs.iot": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "verification-cn-north-1", + }: endpoint{ + Hostname: "verification.signer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "verification-cn-northwest-1", + }: endpoint{ + Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, - "dax": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "wafv2.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "wafv2.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, }, }, - "directconnect": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", }, - "dms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", }, - "docdb": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + }, + Services: services{ + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-east-1", }, }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, - "ds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "ebs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "acm": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "acm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "acm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, }, - }, - "ecs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + }, }, }, - "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + "api.detective": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, - "elasticache": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "api.ecr": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dkr-us-gov-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-gov-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-gov-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - "fips-cn-north-1": endpoint{ - Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + "api.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "fips-cn-northwest-1": endpoint{ - Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + endpointKey{ + Region: "us-gov-west-1-fips-secondary", + }: endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-secondary", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-secondary", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, }, }, - "elasticmapreduce": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + }, }, }, - "emr-containers": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + }, }, }, - "es": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, }, - }, - "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "firehose": service{ + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Deprecated: boxedTrue, + }, }, }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "fsx": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "appstream2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, }, - }, - "gamelift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, - "glue": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.api.aws", + }, }, }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, }, }, - "health": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", }, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "iotanalytics": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, }, }, - "iotevents": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, - "ioteventsdata": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", + }, }, }, - "kafka": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "clouddirectory": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, - "kinesis": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "cloudhsm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, - "kms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "cloudhsmv2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, }, - }, - "lakeformation": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, - "lambda": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "cloudtrail": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + }, }, - }, - "license-manager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + }, }, }, - "logs": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, - "mediaconvert": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{ - Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "mq": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "neptune": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "rds.cn-north-1.amazonaws.com.cn", + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "cn-northwest-1": endpoint{ - Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "organizations": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", }, }, }, - "personalize": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - }, - }, - "polly": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "ram": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "rds": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "redshift": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, }, }, - "resource-groups": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + }, }, }, - "route53": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "route53.amazonaws.com.cn", + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", }, }, }, - "route53resolver": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + }, }, }, - "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "comprehendmedical": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + }, }, }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "accesspoint-cn-north-1": endpoint{ - Hostname: "s3-accesspoint.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, }, - "accesspoint-cn-northwest-1": endpoint{ - Hostname: "s3-accesspoint.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, }, }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "s3-control.cn-north-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, + "config": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "cn-northwest-1": endpoint{ - Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", - SignatureVersions: []string{"s3v4"}, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", }, }, }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + }, }, }, - "securityhub": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, - "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Protocols: []string{"https"}, - }, - "cn-northwest-1": endpoint{ + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, }, }, - }, - "servicecatalog": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", + }, }, }, - "servicediscovery": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + }, }, }, - "sms": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + }, }, }, - "snowball": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - "fips-cn-north-1": endpoint{ - Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-cn-northwest-1": endpoint{ - Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", }, }, }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "datazone.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "datazone.us-gov-west-1.api.aws", + }, }, }, - "ssm": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "states": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, }, }, - "storagegateway": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, }, - }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, }, - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "sts": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, }, - "support": service{ - PartitionEndpoint: "aws-cn-global", - - Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "support.cn-north-1.amazonaws.com.cn", + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-west-1", }, }, }, }, - "swf": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "tagging": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "cn-northwest-1": endpoint{ - Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", }, }, }, - "transcribestreaming": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "transfer": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{ - Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "cn-northwest-1": endpoint{ - Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "fips-cn-north-1": endpoint{ - Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-cn-northwest-1": endpoint{ - Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-northwest-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "workspaces": service{ - - Endpoints: endpoints{ - "cn-northwest-1": endpoint{}, - }, - }, - "xray": service{ - - Endpoints: endpoints{ - "cn-north-1": endpoint{}, - "cn-northwest-1": endpoint{}, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} - -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-east-1": region{ - Description: "AWS GovCloud (US-East)", - }, - "us-gov-west-1": region{ - Description: "AWS GovCloud (US-West)", - }, - }, - Services: services{ - "access-analyzer": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ - Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-east-1.api.aws", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-west-1.api.aws", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, }, }, - "acm": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "acm.us-gov-east-1.amazonaws.com", + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "acm.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", }, }, }, - "acm-pca": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "api.detective": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-west-1.api.aws", + }, + }, + }, + "elasticache": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "api.ecr": service{ - - Endpoints: endpoints{ - "fips-dkr-us-gov-east-1": endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "fips-dkr-us-gov-west-1": endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-gov-east-1", }, }, - "fips-us-gov-east-1": endpoint{ - Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-east-1": endpoint{ - Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, }, - "us-gov-west-1": endpoint{ - Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1-fips-secondary": endpoint{ - Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - }, - }, - "apigateway": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", - }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", }, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", }, }, }, - "appstream2": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - CredentialScope: credentialScope{ - Service: "appstream", + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, }, - }, - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, }, - "us-gov-west-1": endpoint{}, }, }, - "athena": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "autoscaling": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, }, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, }, }, }, - "autoscaling-plans": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Protocols: []string{"http", "https"}, + "email": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", }, }, }, - "backup": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, - "batch": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", }, - "fips-us-gov-west-1": endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "clouddirectory": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, }, }, - "cloudformation": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - }, - }, - "cloudhsm": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - }, - }, - "cloudhsmv2": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "cloudhsm", + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "cloudtrail": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", }, }, }, - "codebuild": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", }, }, }, - "codecommit": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "codedeploy": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - }, - }, - "codepipeline": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "prod-us-gov-east-1", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - }, - }, - "cognito-identity": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "prod-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - }, - }, - "cognito-idp": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "prod-us-gov-west-1", + }: endpoint{ CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - }, - }, - "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "prod-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, - "comprehendmedical": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, - "config": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "config.us-gov-east-1.amazonaws.com", + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "config.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "connect": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, }, }, - "data.jobs.iot": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.api.aws", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "datasync": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dataplane-us-gov-east-1", + }: endpoint{ + Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "dataplane-us-gov-west-1", + }: endpoint{ + Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "directconnect": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - }, - }, - "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ - Hostname: "dms.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "docdb": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Hostname: "rds.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.{region}.{dnsSuffix}", + Protocols: []string{"https"}, }, }, - }, - "ds": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "dynamodb": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + "health": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "global.health.us-gov.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - }, - }, - "ebs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "ec2": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "ec2.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "ec2.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "ecs": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "eks": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "eks.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "eks.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "aws-us-gov-global-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "elasticache.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "iam-govcloud", + }: endpoint{ CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "elasticbeanstalk": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "iam-govcloud", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "iam-govcloud-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "elasticfilesystem": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + "identitystore": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", + }, + }, + }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + "inspector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", }, }, }, - "elasticmapreduce": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{ - Protocols: []string{"https"}, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", }, }, }, - "email": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "email-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-west-1.api.aws", }, - "us-gov-west-1": endpoint{}, }, }, - "es": service{ + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", - Endpoints: endpoints{ - "fips": endpoint{ - Hostname: "es-fips.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "events": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "events.us-gov-east-1.amazonaws.com", + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "events.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - }, - }, - "firehose": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + "iotsecuredtunneling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "fsx": service{ - - Endpoints: endpoints{ - "fips-prod-us-gov-east-1": endpoint{ - Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "fips-prod-us-gov-west-1": endpoint{ - Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "glacier": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "glacier.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, }, - "us-gov-west-1": endpoint{ - Hostname: "glacier.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "fips-api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - }, - }, - "glue": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "fips-data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "greengrass": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "dataplane-us-gov-east-1": endpoint{ - Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "dataplane-us-gov-west-1": endpoint{ - Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, - "fips-us-gov-east-1": endpoint{ - Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-east-1": endpoint{ - Hostname: "greengrass.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "greengrass.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - }, - }, - "guardduty": service{ - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "guardduty.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "guardduty.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, - "health": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ - Hostname: "health-fips.us-gov-west-1.amazonaws.com", + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", }, }, }, - "iam": service{ - PartitionEndpoint: "aws-us-gov-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", }, - "iam-govcloud-fips": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-west-1.api.aws", }, }, }, - "inspector": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "iot": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, - }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-gov-east-1", }, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Service: "execute-api", + Region: "us-gov-east-1", }, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "iotsecuredtunneling": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "kafka": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, - "kinesis": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "kinesis.us-gov-east-1.amazonaws.com", + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "kinesis.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - }, - }, - "kinesisanalytics": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "kms-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "lakeformation": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.api.aws", }, - "us-gov-west-1": endpoint{}, }, }, "lambda": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "license-manager": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "logs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "logs.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "logs.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", }, }, }, - "mediaconvert": service{ + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "managedblockchain": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + }, + }, + }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", }, - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "models.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, }, }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "monitoring": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "monitoring.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "monitoring.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "mq": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "mq-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "mq-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "neptune": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "rds.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "rds.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -10191,28 +40799,90 @@ var awsusgovPartition = partition{ }, }, "network-firewall": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "oidc": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "oidc.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -10223,118 +40893,396 @@ var awsusgovPartition = partition{ "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ Hostname: "organizations.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "fips-aws-us-gov-global": endpoint{ + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ Hostname: "organizations.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "outposts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "outposts.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "outposts.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + }, + }, + }, + "participant.connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", + }, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "pinpoint": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "mobiletargeting", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, }, }, - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "pinpoint.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "polly": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-west-1": endpoint{}, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-west-1.api.aws", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "ram": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "ram.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "ram.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", }, }, }, "rds": service{ - - Endpoints: endpoints{ - "rds.us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds.us-gov-east-1", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "rds.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", }, - "rds.us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "rds.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "redshift": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "redshift.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "redshift.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", @@ -10343,366 +41291,1128 @@ var awsusgovPartition = partition{ }, }, "rekognition": service{ - - Endpoints: endpoints{ - "rekognition-fips.us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rekognition-fips.us-gov-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + }, + }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "resource-explorer-2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "resource-explorer-2.us-gov-west-1.api.aws", }, - "us-gov-west-1": endpoint{}, }, }, "resource-groups": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "resource-groups.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + }, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "route53": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ Hostname: "route53.us-gov.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "fips-aws-us-gov-global": endpoint{ + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ Hostname: "route53.us-gov.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Deprecated: boxedTrue, + }, }, }, "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, }, }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3", "s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "accesspoint-us-gov-east-1": endpoint{ - Hostname: "s3-accesspoint.us-gov-east-1.amazonaws.com", + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, SignatureVersions: []string{"s3v4"}, }, - "accesspoint-us-gov-west-1": endpoint{ - Hostname: "s3-accesspoint.us-gov-west-1.amazonaws.com", + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, SignatureVersions: []string{"s3v4"}, }, - "fips-accesspoint-us-gov-east-1": endpoint{ - Hostname: "s3-accesspoint-fips.us-gov-east-1.amazonaws.com", + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, SignatureVersions: []string{"s3v4"}, }, - "fips-accesspoint-us-gov-west-1": endpoint{ - Hostname: "s3-accesspoint-fips.us-gov-west-1.amazonaws.com", + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-gov-west-1", + Region: "us-gov-east-1", }, }, - "us-gov-east-1": endpoint{ - Hostname: "s3.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, }, - "us-gov-west-1": endpoint{ - Hostname: "s3.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, }, - }, - }, - "s3-control": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - SignatureVersions: []string{"s3v4"}, - - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ - Hostname: "s3-control.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-east-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "s3-control.us-gov-west-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, - }, - }, - "secretsmanager": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-gov-east-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, }, }, }, "securityhub": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "serverlessrepo": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "servicecatalog": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "servicecatalog-appregistry": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-east-1.amazonaws.com", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "servicediscovery", + }: endpoint{ CredentialScope: credentialScope{ - Region: "us-gov-east-1", + Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ - Hostname: "servicecatalog-appregistry.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "servicediscovery", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, - "servicediscovery": service{ - - Endpoints: endpoints{ - "servicediscovery-fips": endpoint{ + endpointKey{ + Region: "servicediscovery-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "servicequotas": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "servicequotas.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "servicequotas.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, - "sms": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "snowball": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "sns": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "sns.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{ - Hostname: "sns.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, }, }, }, "sqs": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "sqs.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "sqs.us-gov-west-1.amazonaws.com", SSLCommonName: "{region}.queue.{dnsSuffix}", Protocols: []string{"http", "https"}, @@ -10713,252 +42423,815 @@ var awsusgovPartition = partition{ }, }, "ssm": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "ssm.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "ssm.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "states": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "states-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "states.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "storagegateway": service{ - - Endpoints: endpoints{ - "fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "streams.dynamodb": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "dynamodb", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "dynamodb", + }, }, }, - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ - Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "sts": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-east-1-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ Hostname: "sts.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "sts.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "support": service{ PartitionEndpoint: "aws-us-gov-global", - - Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "support.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "support.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "swf": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ Hostname: "swf.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "swf.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", }, }, }, "tagging": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + }, }, }, "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "transfer": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, - "us-gov-west-1-fips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ Hostname: "translate-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, }, }, "waf-regional": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, }, - "us-gov-east-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ Hostname: "waf-regional.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, }, - "us-gov-west-1": endpoint{ + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ Hostname: "waf-regional.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "wafv2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "wafv2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, }, }, "workspaces": service{ - - Endpoints: endpoints{ - "fips-us-gov-west-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-west-1": endpoint{}, }, }, "xray": service{ - - Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ Hostname: "xray-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, - "fips-us-gov-west-1": endpoint{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ Hostname: "xray-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", }, - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, }, }, }, @@ -10979,210 +43252,570 @@ var awsisoPartition = partition{ return reg }(), }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, }, Regions: regions{ "us-iso-east-1": region{ Description: "US ISO East", }, + "us-iso-west-1": region{ + Description: "US ISO WEST", + }, }, Services: services{ "api.ecr": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, }, }, "api.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "apigateway": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "autoscaling": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "cloudformation": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "cloudtrail": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "codedeploy": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "comprehend": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "config": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "datapipeline": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "directconnect": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ Hostname: "dms.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, }, - "us-iso-east-1": endpoint{}, }, }, "ds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "dynamodb": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "ec2": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "ecs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "elasticache": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "elasticfilesystem": service{ - - Endpoints: endpoints{ - "fips-us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", }, - "us-iso-east-1": endpoint{}, }, }, "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + }, }, }, "es": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "events": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "firehose": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "glacier": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "health": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "iam": service{ PartitionEndpoint: "aws-iso-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ Hostname: "iam.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", @@ -11191,89 +43824,363 @@ var awsisoPartition = partition{ }, }, "kinesis": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, }, - "us-iso-east-1": endpoint{}, }, }, "lambda": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "license-manager": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "logs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "medialive": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "mediapackage": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "monitoring": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "outposts": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "ram": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "rds": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds-fips.us-iso-east-1", + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-iso-west-1", + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, - "redshift": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "route53": service{ PartitionEndpoint: "aws-iso-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ Hostname: "route53.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", @@ -11282,92 +44189,180 @@ var awsisoPartition = partition{ }, }, "route53resolver": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "runtime.sagemaker": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "s3": service{ - Defaults: endpoint{ - SignatureVersions: []string{"s3v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", + }, }, }, "secretsmanager": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "snowball": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "sns": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sqs": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ Protocols: []string{"http", "https"}, }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "ssm": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "states": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{ - Protocols: []string{"http", "https"}, - }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "sts": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "support": service{ PartitionEndpoint: "aws-iso-global", - - Endpoints: endpoints{ - "aws-iso-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ Hostname: "support.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", @@ -11376,37 +44371,74 @@ var awsisoPartition = partition{ }, }, "swf": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "transcribe": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "transcribestreaming": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "translate": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, }, - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, }, }, "workspaces": service{ - - Endpoints: endpoints{ - "us-iso-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, }, @@ -11427,10 +44459,20 @@ var awsisobPartition = partition{ return reg }(), }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, }, Regions: regions{ "us-isob-east-1": region{ @@ -11439,9 +44481,10 @@ var awsisobPartition = partition{ }, Services: services{ "api.ecr": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", @@ -11449,142 +44492,309 @@ var awsisobPartition = partition{ }, }, }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "application-autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "cloudformation": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "cloudtrail": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "codedeploy": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "config": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "directconnect": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "dms": service{ - - Endpoints: endpoints{ - "dms-fips": endpoint{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, }, - "us-isob-east-1": endpoint{}, }, }, "ds": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "ecs": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "elasticache": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "elasticloadbalancing": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ Protocols: []string{"https"}, }, }, }, "elasticmapreduce": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "es": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "events": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "glacier": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "health": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "iam": service{ PartitionEndpoint: "aws-iso-b-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", @@ -11593,65 +44803,228 @@ var awsisobPartition = partition{ }, }, "kinesis": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "kms": service{ - - Endpoints: endpoints{ - "ProdFips": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, + Deprecated: boxedTrue, }, - "us-isob-east-1": endpoint{}, }, }, "lambda": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "license-manager": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "logs": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "monitoring": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "rds": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds-fips.us-isob-east-1", + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-isob-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, }, }, "redshift": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "route53": service{ PartitionEndpoint: "aws-iso-b-global", IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ Hostname: "route53.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", @@ -11659,72 +45032,135 @@ var awsisobPartition = partition{ }, }, }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "snowball": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "ssm": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "states": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, }, }, - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "sts": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "support": service{ PartitionEndpoint: "aws-iso-b-global", - - Endpoints: endpoints{ - "aws-iso-b-global": endpoint{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ Hostname: "support.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", @@ -11733,16 +45169,100 @@ var awsisobPartition = partition{ }, }, "swf": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, "tagging": service{ - - Endpoints: endpoints{ - "us-isob-east-1": endpoint{}, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, }, }, }, } + +// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe). +func AwsIsoEPartition() Partition { + return awsisoePartition.Partition() +} + +var awsisoePartition = partition{ + ID: "aws-iso-e", + Name: "AWS ISOE (Europe)", + DNSSuffix: "cloud.adc-e.uk", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} + +// AwsIsoFPartition returns the Resolver for AWS ISOF. +func AwsIsoFPartition() Partition { + return awsisofPartition.Partition() +} + +var awsisofPartition = partition{ + ID: "aws-iso-f", + Name: "AWS ISOF", + DNSSuffix: "csp.hci.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index 84316b92..66dec6be 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -9,7 +9,7 @@ // AWS GovCloud (US) (aws-us-gov). // . // -// Enumerating Regions and Endpoint Metadata +// # Enumerating Regions and Endpoint Metadata // // Casting the Resolver returned by DefaultResolver to a EnumPartitions interface // will allow you to get access to the list of underlying Partitions with the @@ -17,22 +17,22 @@ // resolving to a single partition, or enumerate regions, services, and endpoints // in the partition. // -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() // -// for _, p := range partitions { -// fmt.Println("Regions for", p.ID()) -// for id, _ := range p.Regions() { -// fmt.Println("*", id) -// } +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } // -// fmt.Println("Services for", p.ID()) -// for id, _ := range p.Services() { -// fmt.Println("*", id) -// } -// } +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } // -// Using Custom Endpoints +// # Using Custom Endpoints // // The endpoints package also gives you the ability to use your own logic how // endpoints are resolved. This is a great way to define a custom endpoint @@ -47,20 +47,19 @@ // of Resolver.EndpointFor, converting it to a type that satisfies the // Resolver interface. // +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } // -// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { -// if service == endpoints.S3ServiceID { -// return endpoints.ResolvedEndpoint{ -// URL: "s3.custom.endpoint.com", -// SigningRegion: "custom-signing-region", -// }, nil -// } +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } // -// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) -// } -// -// sess := session.Must(session.NewSession(&aws.Config{ -// Region: aws.String("us-west-2"), -// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), -// })) +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 8e8636f5..a686a48f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -8,6 +8,41 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" ) +// A Logger is a minimalistic interface for the SDK to log messages to. +type Logger interface { + Log(...interface{}) +} + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution +// behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint + // resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + // Options provide the configuration needed to direct how the // endpoints will be resolved. type Options struct { @@ -21,8 +56,19 @@ type Options struct { // be returned. This endpoint may not be valid. If StrictMatching is // enabled only services that are known to support dualstack will return // dualstack endpoints. + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. UseDualStack bool + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint FIPSEndpointState + // Enables strict matching of services and regions resolved endpoints. // If the partition doesn't enumerate the exact service and region an // error will be returned. This option will prevent returning endpoints @@ -56,6 +102,30 @@ type Options struct { // S3 Regional Endpoint flag helps with resolving the S3 endpoint S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Logger is the logger that will be used to log messages. + Logger Logger + + // Determines whether logging of deprecated endpoints usage is enabled. + LogDeprecated bool +} + +func (o Options) getEndpointVariant(service string) (v endpointVariant) { + const s3 = "s3" + const s3Control = "s3-control" + + if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) || + ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) { + v |= dualStackVariant + } + if o.UseFIPSEndpoint == FIPSEndpointStateEnabled { + v |= fipsVariant + } + return v } // EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode. @@ -196,10 +266,25 @@ func DisableSSLOption(o *Options) { // UseDualStackOption sets the UseDualStack option. Can be used as a functional // option when resolving endpoints. +// +// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint. +// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option. func UseDualStackOption(o *Options) { o.UseDualStack = true } +// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseDualStackEndpointOption(o *Options) { + o.UseDualStackEndpoint = DualStackEndpointStateEnabled +} + +// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseFIPSEndpointOption(o *Options) { + o.UseFIPSEndpoint = FIPSEndpointStateEnabled +} + // StrictMatchingOption sets the StrictMatching option. Can be used as a functional // option when resolving endpoints. func StrictMatchingOption(o *Options) { @@ -268,10 +353,12 @@ type EnumPartitions interface { // as the second parameter. // // This example shows how to get the regions for DynamoDB in the AWS partition. -// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) // // This is equivalent to using the partition directly. -// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +// +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { for _, p := range ps { if p.ID() != partitionID { @@ -338,8 +425,8 @@ func (p Partition) ID() string { return p.id } // of new regions and services expansions. // // Errors that can be returned. -// * UnknownServiceError -// * UnknownEndpointError +// - UnknownServiceError +// - UnknownEndpointError func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { return p.p.EndpointFor(service, region, opts...) } @@ -407,7 +494,7 @@ func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (Resolve func (r Region) Services() map[string]Service { ss := map[string]Service{} for id, s := range r.p.Services { - if _, ok := s.Endpoints[r.id]; ok { + if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok { ss[id] = Service{ id: id, p: r.p, @@ -452,9 +539,12 @@ func (s Service) Regions() map[string]Region { } for id := range service.Endpoints { - if r, ok := s.p.Regions[id]; ok { - rs[id] = Region{ - id: id, + if id.Variant != 0 { + continue + } + if r, ok := s.p.Regions[id.Region]; ok { + rs[id.Region] = Region{ + id: id.Region, desc: r.Description, p: s.p, } @@ -472,8 +562,11 @@ func (s Service) Regions() map[string]Region { func (s Service) Endpoints() map[string]Endpoint { es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) for id := range s.p.Services[s.id].Endpoints { - es[id] = Endpoint{ - id: id, + if id.Variant != 0 { + continue + } + es[id.Region] = Endpoint{ + id: id.Region, serviceID: s.id, p: s.p, } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index c6c6a033..89f6627d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -1,6 +1,7 @@ package endpoints import ( + "encoding/json" "fmt" "regexp" "strconv" @@ -12,6 +13,34 @@ const ( ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest" ) +const dnsSuffixTemplateKey = "{dnsSuffix}" + +// defaultKey is a compound map key of a variant and other values. +type defaultKey struct { + Variant endpointVariant + ServiceVariant serviceVariant +} + +// endpointKey is a compound map key of a region and associated variant value. +type endpointKey struct { + Region string + Variant endpointVariant +} + +// endpointVariant is a bit field to describe the endpoints attributes. +type endpointVariant uint64 + +// serviceVariant is a bit field to describe the service endpoint attributes. +type serviceVariant uint64 + +const ( + // fipsVariant indicates that the endpoint is FIPS capable. + fipsVariant endpointVariant = 1 << (64 - 1 - iota) + + // dualStackVariant indicates that the endpoint is DualStack capable. + dualStackVariant +) + var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) type partitions []partition @@ -20,8 +49,12 @@ func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) var opt Options opt.Set(opts...) + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + for i := 0; i < len(ps); i++ { - if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + if !ps[i].canResolveEndpoint(service, region, opt) { continue } @@ -49,14 +82,76 @@ func (ps partitions) Partitions() []Partition { return parts } +type endpointWithVariants struct { + endpoint + Variants []endpointWithTags `json:"variants"` +} + +type endpointWithTags struct { + endpoint + Tags []string `json:"tags"` +} + +type endpointDefaults map[defaultKey]endpoint + +func (p *endpointDefaults) UnmarshalJSON(data []byte) error { + if *p == nil { + *p = make(endpointDefaults) + } + + var e endpointWithVariants + if err := json.Unmarshal(data, &e); err != nil { + return err + } + + (*p)[defaultKey{Variant: 0}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*p)[defaultKey{Variant: endpointVariant}] = ve + } + + return nil +} + +func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) { + if len(tags) == 0 { + unknown = true + return + } + + for _, tag := range tags { + switch { + case strings.EqualFold("fips", tag): + ev |= fipsVariant + case strings.EqualFold("dualstack", tag): + ev |= dualStackVariant + default: + unknown = true + } + } + return ev, unknown +} + type partition struct { - ID string `json:"partition"` - Name string `json:"partitionName"` - DNSSuffix string `json:"dnsSuffix"` - RegionRegex regionRegex `json:"regionRegex"` - Defaults endpoint `json:"defaults"` - Regions regions `json:"regions"` - Services services `json:"services"` + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpointDefaults `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` } func (p partition) Partition() Partition { @@ -67,15 +162,18 @@ func (p partition) Partition() Partition { } } -func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { +func (p partition) canResolveEndpoint(service, region string, options Options) bool { s, hasService := p.Services[service] - _, hasEndpoint := s.Endpoints[region] + _, hasEndpoint := s.Endpoints[endpointKey{ + Region: region, + Variant: options.getEndpointVariant(service), + }] if hasEndpoint && hasService { return true } - if strictMatch { + if options.StrictMatching { return false } @@ -106,6 +204,10 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( var opt Options opt.Set(opts...) + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + s, hasService := p.Services[service] if service == Ec2metadataServiceID && !hasService { @@ -123,21 +225,44 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( region = s.PartitionEndpoint } - if (service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint) || - (service == "s3" && opt.S3UsEast1RegionalEndpoint != RegionalS3UsEast1Endpoint) { - if _, ok := legacyGlobalRegions[service][region]; ok { - region = "aws-global" - } + if r, ok := isLegacyGlobalRegion(service, region, opt); ok { + region = r + } + + variant := opt.getEndpointVariant(service) + + endpoints := s.Endpoints + + serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}] + // If we searched for a variant which may have no explicit service defaults, + // then we need to inherit the standard service defaults except the hostname and dnsSuffix + if variant != 0 && !hasServiceDefault { + serviceDefaults = s.Defaults[defaultKey{}] + serviceDefaults.Hostname = "" + serviceDefaults.DNSSuffix = "" } - e, hasEndpoint := s.endpointForRegion(region) - if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { - return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}] + + var dnsSuffix string + if len(serviceDefaults.DNSSuffix) > 0 { + dnsSuffix = serviceDefaults.DNSSuffix + } else if variant == 0 { + // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for + // a non-variant endpoint then we need to set the dnsSuffix. + dnsSuffix = p.DNSSuffix + } + + noDefaults := !hasServiceDefault && !hasPartitionDefault + + e, hasEndpoint := s.endpointForRegion(region, endpoints, variant) + if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant)) } - defs := []endpoint{p.Defaults, s.Defaults} + defs := []endpoint{partitionDefaults, serviceDefaults} - return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) + return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt) } func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint { @@ -165,6 +290,31 @@ func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointMod } } +func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) { + if opt.getEndpointVariant(service) != 0 { + return "", false + } + + const ( + sts = "sts" + s3 = "s3" + awsGlobal = "aws-global" + ) + + switch { + case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint: + return region, false + case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint: + return region, false + default: + if _, ok := legacyGlobalRegions[service][region]; ok { + return awsGlobal, true + } + } + + return region, false +} + func serviceList(ss services) []string { list := make([]string, 0, len(ss)) for k := range ss { @@ -172,10 +322,13 @@ func serviceList(ss services) []string { } return list } -func endpointList(es endpoints) []string { +func endpointList(es serviceEndpoints, variant endpointVariant) []string { list := make([]string, 0, len(es)) for k := range es { - list = append(list, k) + if k.Variant != variant { + continue + } + list = append(list, k.Region) } return list } @@ -207,19 +360,19 @@ type region struct { type services map[string]service type service struct { - PartitionEndpoint string `json:"partitionEndpoint"` - IsRegionalized boxedBool `json:"isRegionalized,omitempty"` - Defaults endpoint `json:"defaults"` - Endpoints endpoints `json:"endpoints"` + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpointDefaults `json:"defaults"` + Endpoints serviceEndpoints `json:"endpoints"` } -func (s *service) endpointForRegion(region string) (endpoint, bool) { - if e, ok := s.Endpoints[region]; ok { +func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) { + if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok { return e, true } if s.IsRegionalized == boxedFalse { - return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint } // Unable to find any matching endpoint, return @@ -227,22 +380,73 @@ func (s *service) endpointForRegion(region string) (endpoint, bool) { return endpoint{}, false } -type endpoints map[string]endpoint +type serviceEndpoints map[endpointKey]endpoint + +func (s *serviceEndpoints) UnmarshalJSON(data []byte) error { + if *s == nil { + *s = make(serviceEndpoints) + } + + var regionToEndpoint map[string]endpointWithVariants + + if err := json.Unmarshal(data, ®ionToEndpoint); err != nil { + return err + } + + for region, e := range regionToEndpoint { + (*s)[endpointKey{Region: region}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve + } + } + + return nil +} type endpoint struct { Hostname string `json:"hostname"` Protocols []string `json:"protocols"` CredentialScope credentialScope `json:"credentialScope"` - // Custom fields not modeled - HasDualStack boxedBool `json:"-"` - DualStackHostname string `json:"-"` + DNSSuffix string `json:"dnsSuffix"` // Signature Version not used SignatureVersions []string `json:"signatureVersions"` // SSLCommonName not used. SSLCommonName string `json:"sslCommonName"` + + Deprecated boxedBool `json:"deprecated"` +} + +// isZero returns whether the endpoint structure is an empty (zero) value. +func (e endpoint) isZero() bool { + switch { + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (credentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + case len(e.SSLCommonName) != 0: + return false + } + return true } const ( @@ -271,7 +475,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { +func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -292,23 +496,26 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ } hostname := e.Hostname - // Offset the hostname for dualstack if enabled - if opts.UseDualStack && e.HasDualStack == boxedTrue { - hostname = e.DualStackHostname - region = signingRegion - } if !validateInputRegion(region) { return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") } + if len(merged.DNSSuffix) > 0 { + dnsSuffix = merged.DNSSuffix + } + u := strings.Replace(hostname, "{service}", service, 1) u = strings.Replace(u, "{region}", region, 1) - u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1) scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) u = fmt.Sprintf("%s://%s", scheme, u) + if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil { + opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u)) + } + return ResolvedEndpoint{ URL: u, PartitionID: partitionID, @@ -346,11 +553,11 @@ func (e *endpoint) mergeIn(other endpoint) { if len(other.SSLCommonName) > 0 { e.SSLCommonName = other.SSLCommonName } - if other.HasDualStack != boxedBoolUnset { - e.HasDualStack = other.HasDualStack + if len(other.DNSSuffix) > 0 { + e.DNSSuffix = other.DNSSuffix } - if len(other.DualStackHostname) > 0 { - e.DualStackHostname = other.DualStackHostname + if other.Deprecated != boxedBoolUnset { + e.Deprecated = other.Deprecated } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go index db6efd60..84922bca 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -155,18 +155,71 @@ func serviceSet(ps partitions) map[string]struct{} { return set } +func endpointVariantSetter(variant endpointVariant) (string, error) { + if variant == 0 { + return "0", nil + } + + if variant > (fipsVariant | dualStackVariant) { + return "", fmt.Errorf("unknown endpoint variant") + } + + var symbols []string + if variant&fipsVariant != 0 { + symbols = append(symbols, "fipsVariant") + } + if variant&dualStackVariant != 0 { + symbols = append(symbols, "dualStackVariant") + } + v := strings.Join(symbols, "|") + + return v, nil +} + +func endpointKeySetter(e endpointKey) (string, error) { + var sb strings.Builder + sb.WriteString("endpointKey{\n") + sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region)) + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + +func defaultKeySetter(e defaultKey) (string, error) { + var sb strings.Builder + sb.WriteString("defaultKey{\n") + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + var funcMap = template.FuncMap{ - "ToSymbol": toSymbol, - "QuoteString": quoteString, - "RegionConst": regionConstName, - "PartitionGetter": partitionGetter, - "PartitionVarName": partitionVarName, - "ListPartitionNames": listPartitionNames, - "BoxedBoolIfSet": boxedBoolIfSet, - "StringIfSet": stringIfSet, - "StringSliceIfSet": stringSliceIfSet, - "EndpointIsSet": endpointIsSet, - "ServicesSet": serviceSet, + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, + "EndpointVariantSetter": endpointVariantSetter, + "EndpointKeySetter": endpointKeySetter, + "DefaultKeySetter": defaultKeySetter, } const v3Tmpl = ` @@ -272,9 +325,9 @@ partition{ {{ StringIfSet "Name: %q,\n" .Name -}} {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults }}, - {{- end }} + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} Regions: {{ template "gocode Regions" .Regions }}, Services: {{ template "gocode Services" .Services }}, } @@ -315,19 +368,27 @@ services{ service{ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} - {{ if EndpointIsSet .Defaults -}} - Defaults: {{ template "gocode Endpoint" .Defaults -}}, - {{- end }} + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} {{ if .Endpoints -}} Endpoints: {{ template "gocode Endpoints" .Endpoints }}, {{- end }} } {{- end }} +{{ define "gocode Defaults" -}} +endpointDefaults{ + {{ range $id, $endpoint := . -}} + {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + {{ define "gocode Endpoints" -}} -endpoints{ +serviceEndpoints{ {{ range $id, $endpoint := . -}} - "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, {{ end }} } {{- end }} @@ -335,6 +396,7 @@ endpoints{ {{ define "gocode Endpoint" -}} endpoint{ {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} @@ -344,9 +406,7 @@ endpoint{ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} }, {{- end }} - {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} - {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} - + {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}} } {{- end }} ` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index 6ed15b2e..49674cc7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -77,6 +77,9 @@ const ( // wire unmarshaled message content of requests and responses made while // using the SDK Will also enable LogDebug. LogDebugWithEventStreamBody + + // LogDebugWithDeprecated states the SDK should log details about deprecated functionality. + LogDebugWithDeprecated ) // A Logger is a minimalistic interface for the SDK to log messages to. Should diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index e819ab6c..9556332b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -330,6 +330,9 @@ func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { // WithSetRequestHeaders updates the operation request's HTTP header to contain // the header key value pairs provided. If the header key already exists in the // request's HTTP header set, the existing value(s) will be replaced. +// +// Header keys added will be added as canonical format with title casing +// applied via http.Header.Set method. func WithSetRequestHeaders(h map[string]string) Option { return withRequestHeader(h).SetRequestHeaders } @@ -338,6 +341,6 @@ type withRequestHeader map[string]string func (h withRequestHeader) SetRequestHeaders(r *Request) { for k, v := range h { - r.HTTPRequest.Header[k] = []string{v} + r.HTTPRequest.Header.Set(k, v) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index fb0a68fc..636d9ec9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "io/ioutil" "net/http" "net/url" "reflect" @@ -525,6 +526,14 @@ func (r *Request) GetBody() io.ReadSeeker { // Send will not close the request.Request's body. func (r *Request) Send() error { defer func() { + // Ensure a non-nil HTTPResponse parameter is set to ensure handlers + // checking for HTTPResponse values, don't fail. + if r.HTTPResponse == nil { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + Body: ioutil.NopCloser(&bytes.Buffer{}), + } + } // Regardless of success or failure of the request trigger the Complete // request handlers. r.Handlers.Complete.Run(r) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 752ae47f..3f0001f9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -15,8 +15,8 @@ import ( // and determine if a request API error should be retried. // // client.DefaultRetryer is the SDK's default implementation of the Retryer. It -// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle -// methods to determine if the request is retried. +// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to +// determine if the request is retried. type Retryer interface { // RetryRules return the retry delay that should be used by the SDK before // making another request attempt for the failed request. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go index 3efdac29..ea8e3537 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -14,8 +14,22 @@ import ( "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/ssooidc" + "github.com/aws/aws-sdk-go/service/sts" ) +// CredentialsProviderOptions specifies additional options for configuring +// credentials providers. +type CredentialsProviderOptions struct { + // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, + // such as setting its ExpiryWindow. + WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) + + // ProcessProviderOptions configures a ProcessProvider, + // such as setting its Timeout. + ProcessProviderOptions func(*processcreds.ProcessProvider) +} + func resolveCredentials(cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, @@ -24,7 +38,7 @@ func resolveCredentials(cfg *aws.Config, switch { case len(sessOpts.Profile) != 0: - // User explicitly provided an Profile in the session's configuration + // User explicitly provided a Profile in the session's configuration // so load that profile from shared config first. // Github(aws/aws-sdk-go#2727) return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) @@ -40,6 +54,7 @@ func resolveCredentials(cfg *aws.Config, envCfg.WebIdentityTokenFilePath, envCfg.RoleARN, envCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, ) default: @@ -59,6 +74,7 @@ var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, " func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, filepath string, roleARN, sessionName string, + credOptions *CredentialsProviderOptions, ) (*credentials.Credentials, error) { if len(filepath) == 0 { @@ -69,17 +85,18 @@ func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, return nil, WebIdentityEmptyRoleARNErr } - creds := stscreds.NewWebIdentityCredentials( - &Session{ - Config: cfg, - Handlers: handlers.Copy(), - }, - roleARN, - sessionName, - filepath, - ) + svc := sts.New(&Session{ + Config: cfg, + Handlers: handlers.Copy(), + }) - return creds, nil + var optFns []func(*stscreds.WebIdentityRoleProvider) + if credOptions != nil && credOptions.WebIdentityRoleProviderOptions != nil { + optFns = append(optFns, credOptions.WebIdentityRoleProviderOptions) + } + + p := stscreds.NewWebIdentityRoleProviderWithOptions(svc, roleARN, sessionName, stscreds.FetchTokenPath(filepath), optFns...) + return credentials.NewCredentials(p), nil } func resolveCredsFromProfile(cfg *aws.Config, @@ -114,6 +131,7 @@ func resolveCredsFromProfile(cfg *aws.Config, sharedCfg.WebIdentityTokenFile, sharedCfg.RoleARN, sharedCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, ) case sharedCfg.hasSSOConfiguration(): @@ -121,7 +139,11 @@ func resolveCredsFromProfile(cfg *aws.Config, case len(sharedCfg.CredentialProcess) != 0: // Get credentials from CredentialProcess - creds = processcreds.NewCredentials(sharedCfg.CredentialProcess) + var optFns []func(*processcreds.ProcessProvider) + if sessOpts.CredentialsProviderOptions != nil && sessOpts.CredentialsProviderOptions.ProcessProviderOptions != nil { + optFns = append(optFns, sessOpts.CredentialsProviderOptions.ProcessProviderOptions) + } + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess, optFns...) default: // Fallback to default credentials provider, include mock errors for @@ -160,8 +182,28 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req return nil, err } + var optFns []func(provider *ssocreds.Provider) cfgCopy := cfg.Copy() - cfgCopy.Region = &sharedCfg.SSORegion + + if sharedCfg.SSOSession != nil { + cfgCopy.Region = &sharedCfg.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedCfg.SSOSession.Name) + if err != nil { + return nil, err + } + // create oidcClient with AnonymousCredentials to avoid recursively resolving credentials + mySession := Must(NewSession(&aws.Config{ + Credentials: credentials.AnonymousCredentials, + })) + oidcClient := ssooidc.New(mySession, cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath) + optFns = append(optFns, func(p *ssocreds.Provider) { + p.TokenProvider = tokenProvider + p.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = &sharedCfg.SSORegion + } return ssocreds.NewCredentials( &Session{ @@ -171,6 +213,7 @@ func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers req sharedCfg.SSOAccountID, sharedCfg.SSORoleName, sharedCfg.SSOStartURL, + optFns..., ), nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 43b56863..ff3cc012 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -285,5 +285,83 @@ The custom EC2 IMDS endpoint can also be specified via the Session options. sess, err := session.NewSessionWithOptions(session.Options{ EC2MetadataEndpoint: "http://[::1]", }) + +FIPS and DualStack Endpoints + +The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. + +You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable +or disable FIPS endpoint resolution. + + AWS_USE_FIPS_ENDPOINT=true + +To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable +or disable FIPS endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_fips_endpoint=true + +To configure a FIPS endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + +You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to +enable or disable DualStack endpoint resolution. + + AWS_USE_DUALSTACK_ENDPOINT=true + +To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable +or disable DualStack endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_dualstack_endpoint=true + +To configure a DualStack endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) */ package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index fffe2f35..93bb5de6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -170,6 +170,24 @@ type envConfig struct { // // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint endpoints.FIPSEndpointState } var ( @@ -239,6 +257,9 @@ var ( ec2IMDSEndpointModeEnvKey = []string{ "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", } + ec2MetadataV1DisabledEnvKey = []string{ + "AWS_EC2_METADATA_V1_DISABLED", + } useCABundleKey = []string{ "AWS_CA_BUNDLE", } @@ -248,6 +269,12 @@ var ( useClientTLSKey = []string{ "AWS_SDK_GO_CLIENT_TLS_KEY", } + awsUseDualStackEndpoint = []string{ + "AWS_USE_DUALSTACK_ENDPOINT", + } + awsUseFIPSEndpoint = []string{ + "AWS_USE_FIPS_ENDPOINT", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -375,6 +402,15 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { return envConfig{}, err } + setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey) + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, awsUseFIPSEndpoint); err != nil { + return cfg, err + } return cfg, nil } @@ -388,6 +424,24 @@ func setFromEnvVal(dst *string, keys []string) { } } +func setBoolPtrFromEnvVal(dst **bool, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch { + case strings.EqualFold(value, "false"): + *dst = new(bool) + **dst = false + case strings.EqualFold(value, "true"): + *dst = new(bool) + **dst = true + } + } +} + func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { for _, k := range keys { value := os.Getenv(k) @@ -401,3 +455,45 @@ func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []str } return nil } + +func setUseDualStackEndpointFromEnvVal(dst *endpoints.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *endpoints.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 4b2e057e..3c88dee5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -8,6 +8,7 @@ import ( "io/ioutil" "net/http" "os" + "strings" "time" "github.com/aws/aws-sdk-go/aws" @@ -36,7 +37,7 @@ const ( // ErrSharedConfigSourceCollision will be returned if a section contains both // source_profile and credential_source -var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso", nil) +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token", nil) // ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment // variables are empty and Environment was set as the credential source @@ -173,7 +174,6 @@ const ( // Options provides the means to control how a Session is created and what // configuration values will be loaded. -// type Options struct { // Provides config values for the SDK to use when creating service clients // and making API requests to services. Any value set in with this field @@ -223,7 +223,7 @@ type Options struct { // from stdin for the MFA token code. // // This field is only used if the shared configuration is enabled, and - // the config enables assume role wit MFA via the mfa_serial field. + // the config enables assume role with MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) // When the SDK's shared config is configured to assume a role this option @@ -303,6 +303,11 @@ type Options struct { // // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies options for creating credential providers. + // These are only used if the aws.Config does not already + // include credentials. + CredentialsProviderOptions *CredentialsProviderOptions } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -316,24 +321,24 @@ type Options struct { // credentials file. Enabling the Shared Config will also allow the Session // to be built with retrieving credentials with AssumeRole set in the config. // -// // Equivalent to session.New -// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) // -// // Specify profile to load for the session's config -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Profile: "profile_name", -// })) +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) // -// // Specify profile for config and region for requests -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// Config: aws.Config{Region: aws.String("us-east-1")}, -// Profile: "profile_name", -// })) +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) // -// // Force enable Shared Config support -// sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: session.SharedConfigEnable, -// })) +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig var err error @@ -369,7 +374,7 @@ func NewSessionWithOptions(opts Options) (*Session, error) { // This helper is intended to be used in variable initialization to load the // Session and configuration at startup. Such as: // -// var sess = session.Must(session.NewSession()) +// var sess = session.Must(session.NewSession()) func Must(sess *Session, err error) *Session { if err != nil { panic(err) @@ -774,14 +779,12 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) } - // Configure credentials if not already set by the user when creating the - // Session. - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) - if err != nil { - return err - } - cfg.Credentials = creds + cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback + if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled) + } + if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled) } cfg.S3UseARNRegion = userCfg.S3UseARNRegion @@ -792,6 +795,31 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion } + for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} { + if v != endpoints.DualStackEndpointStateUnset { + cfg.UseDualStackEndpoint = v + break + } + } + + for _, v := range []endpoints.FIPSEndpointState{userCfg.UseFIPSEndpoint, envCfg.UseFIPSEndpoint, sharedCfg.UseFIPSEndpoint} { + if v != endpoints.FIPSEndpointStateUnset { + cfg.UseFIPSEndpoint = v + break + } + } + + // Configure credentials if not already set by the user when creating the Session. + // Credentials are resolved last such that all _resolved_ config values are propagated to credential providers. + // ticket: P83606045 + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + return nil } @@ -825,8 +853,8 @@ func initHandlers(s *Session) { // and handlers. If any additional configs are provided they will be merged // on top of the Session's copied config. // -// // Create a copy of the current Session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ Config: s.Config.Copy(cfgs...), @@ -845,8 +873,10 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) + resolvedRegion := normalizeRegion(s.Config) + region := aws.StringValue(s.Config.Region) - resolved, err := s.resolveEndpoint(service, region, s.Config) + resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config) if err != nil { s.Handlers.Validate.PushBack(func(r *request.Request) { if len(r.ClientInfo.Endpoint) != 0 { @@ -867,12 +897,13 @@ func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Confi SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, } } const ec2MetadataServiceID = "ec2metadata" -func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { +func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { return endpoints.ResolvedEndpoint{ @@ -884,7 +915,12 @@ func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endp resolved, err := cfg.EndpointResolver.EndpointFor(service, region, func(opt *endpoints.Options) { opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint + + opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is // provided in envConfig or sharedConfig with envConfig getting // precedence. @@ -898,6 +934,11 @@ func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endp // Support the condition where the service is modeled but its // endpoint metadata is not available. opt.ResolveUnknownService = true + + opt.ResolvedRegion = resolvedRegion + + opt.Logger = cfg.Logger + opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated) }, ) if err != nil { @@ -913,6 +954,8 @@ func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endp func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) + resolvedRegion := normalizeRegion(s.Config) + var resolved endpoints.ResolvedEndpoint if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) @@ -926,6 +969,7 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, } } @@ -939,3 +983,23 @@ func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aw r.Error = err }) } + +// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided +// config to have the equivalent options for resolution and returns the resolved region name. +func normalizeRegion(cfg *aws.Config) (resolved string) { + const fipsInfix = "-fips-" + const fipsPrefix = "-fips" + const fipsSuffix = "fips-" + + region := aws.StringValue(cfg.Region) + + if strings.Contains(region, fipsInfix) || + strings.Contains(region, fipsPrefix) || + strings.Contains(region, fipsSuffix) { + resolved = strings.Replace(strings.Replace(strings.Replace( + region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1) + cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } + + return resolved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 6830ece7..f3ce8183 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -26,6 +26,13 @@ const ( roleSessionNameKey = `role_session_name` // optional roleDurationSecondsKey = "duration_seconds" // optional + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + // AWS Single Sign-On (AWS SSO) group ssoAccountIDKey = "sso_account_id" ssoRegionKey = "sso_region" @@ -72,6 +79,15 @@ const ( // EC2 IMDS Endpoint ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + // ECS IMDSv1 disable fallback + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // Use FIPS Endpoint Resolution + useFIPSEndpointKey = "use_fips_endpoint" ) // sharedConfig represents the configuration fields of the SDK config files. @@ -93,6 +109,10 @@ type sharedConfig struct { CredentialProcess string WebIdentityTokenFile string + // SSO session options + SSOSessionName string + SSOSession *ssoSession + SSOAccountID string SSORegion string SSORoleName string @@ -161,6 +181,24 @@ type sharedConfig struct { // // ec2_metadata_service_endpoint=http://fd00:ec2::254 EC2IMDSEndpoint string + + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint endpoints.FIPSEndpointState } type sharedConfigFile struct { @@ -168,6 +206,20 @@ type sharedConfigFile struct { IniData ini.Sections } +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type ssoSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *ssoSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURL) +} + // loadSharedConfig retrieves the configuration from the list of files using // the profile provided. The order the files are listed will determine // precedence. Values in subsequent files will overwrite values defined in @@ -248,13 +300,13 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s // profile only have credential provider options. cfg.clearAssumeRoleOptions() } else { - // First time a profile has been seen, It must either be a assume role - // credentials, or SSO. Assert if the credential type requires a role ARN, - // the ARN is also set, or validate that the SSO configuration is complete. + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set if err := cfg.validateCredentialsConfig(profile); err != nil { return err } } + profiles[profile] = struct{}{} if err := cfg.validateCredentialType(); err != nil { @@ -290,6 +342,30 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s cfg.SourceProfile = srcCfg } + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if cfg.hasSSOTokenProviderConfiguration() { + skippedFiles = 0 + for _, f := range files { + section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + if ok { + var ssoSession ssoSession + ssoSession.setFromIniSection(section) + ssoSession.Name = cfg.SSOSessionName + cfg.SSOSession = &ssoSession + break + } + skippedFiles++ + } + if skippedFiles == len(files) { + // If all files were skipped because the sso session section is not found, return + // the sso section not found error. + return fmt.Errorf("failed to find SSO session section, %v", cfg.SSOSessionName) + } + } + return nil } @@ -322,8 +398,15 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.Region, section, regionKey) updateString(&cfg.CustomCABundle, section, customCABundleKey) + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for (aws-sdk-go-v2/#2276): + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + var d time.Duration + if v, ok := section.Int(roleDurationSecondsKey); ok { + d = time.Duration(v) * time.Second + } cfg.AssumeRoleDuration = &d } @@ -345,6 +428,10 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e cfg.S3UsEast1RegionalEndpoint = sre } + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&cfg.SSOSessionName, section, ssoSessionNameKey) + // AWS Single Sign-On (AWS SSO) updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) updateString(&cfg.SSORegion, section, ssoRegionKey) @@ -356,6 +443,11 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e ec2MetadataServiceEndpointModeKey, file.Filename, err) } updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) + + updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) + + updateUseFIPSEndpoint(&cfg.UseFIPSEndpoint, section, useFIPSEndpointKey) } updateString(&cfg.CredentialProcess, section, credentialProcessKey) @@ -439,32 +531,20 @@ func (cfg *sharedConfig) validateCredentialType() error { } func (cfg *sharedConfig) validateSSOConfiguration() error { - if !cfg.hasSSOConfiguration() { + if cfg.hasSSOTokenProviderConfiguration() { + err := cfg.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } return nil } - var missing []string - if len(cfg.SSOAccountID) == 0 { - missing = append(missing, ssoAccountIDKey) - } - - if len(cfg.SSORegion) == 0 { - missing = append(missing, ssoRegionKey) - } - - if len(cfg.SSORoleName) == 0 { - missing = append(missing, ssoRoleNameKey) - } - - if len(cfg.SSOStartURL) == 0 { - missing = append(missing, ssoStartURL) - } - - if len(missing) > 0 { - return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", - cfg.Profile, strings.Join(missing, ", ")) + if cfg.hasLegacySSOConfiguration() { + err := cfg.validateLegacySSOConfiguration() + if err != nil { + return err + } } - return nil } @@ -503,15 +583,76 @@ func (cfg *sharedConfig) clearAssumeRoleOptions() { } func (cfg *sharedConfig) hasSSOConfiguration() bool { - switch { - case len(cfg.SSOAccountID) != 0: - case len(cfg.SSORegion) != 0: - case len(cfg.SSORoleName) != 0: - case len(cfg.SSOStartURL) != 0: - default: - return false + return cfg.hasSSOTokenProviderConfiguration() || cfg.hasLegacySSOConfiguration() +} + +func (c *sharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *sharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *sharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) } - return true + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURL, c.Profile, ssoStartURL, ssoSectionPrefix) + } + + return nil +} + +func (c *sharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil } func oneOrNone(bs ...bool) bool { @@ -544,7 +685,10 @@ func updateBool(dst *bool, section ini.Section, key string) { if !section.Has(key) { return } - *dst = section.Bool(key) + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v } // updateBoolPtr will only update the dst with the value in the section key, @@ -553,8 +697,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) { if !section.Has(key) { return } + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) *dst = new(bool) - **dst = section.Bool(key) + **dst = v } // SharedConfigLoadError is an error for the shared config file failed to load. @@ -673,3 +820,37 @@ func (e CredentialRequiresARNError) OrigErr() error { func (e CredentialRequiresARNError) Error() string { return awserr.SprintError(e.Code(), e.Message(), "", nil) } + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = endpoints.DualStackEndpointStateEnabled + } else { + *dst = endpoints.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = endpoints.FIPSEndpointStateEnabled + } else { + *dst = endpoints.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index d4653031..b542df93 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -3,21 +3,21 @@ // Provides request signing for request that need to be signed with // AWS V4 Signatures. // -// Standalone Signer +// # Standalone Signer // // Generally using the signer outside of the SDK should not require any additional // logic when using Go v1.5 or higher. The signer does this by taking advantage // of the URL.EscapedPath method. If your request URI requires additional escaping -// you many need to use the URL.Opaque to define what the raw URI should be sent +// you may need to use the URL.Opaque to define what the raw URI should be sent // to the service as. // // The signer will first check the URL.Opaque field, and use its value if set. // The signer does require the URL.Opaque field to be set in the form of: // -// "///" +// "///" // -// // e.g. -// "//example.com/some/path" +// // e.g. +// "//example.com/some/path" // // The leading "//" and hostname are required or the URL.Opaque escaping will // not work correctly. @@ -125,6 +125,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, "X-Amz-Grant-Full-control": struct{}{}, "X-Amz-Grant-Read": struct{}{}, "X-Amz-Grant-Read-Acp": struct{}{}, @@ -135,6 +136,7 @@ var requiredSignedHeaders = rules{ "X-Amz-Request-Payer": struct{}{}, "X-Amz-Server-Side-Encryption": struct{}{}, "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Context": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -634,21 +636,25 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) } - headerValues := make([]string, len(headers)) + headerItems := make([]string, len(headers)) for i, k := range headers { if k == "host" { if ctx.Request.Host != "" { - headerValues[i] = "host:" + ctx.Request.Host + headerItems[i] = "host:" + ctx.Request.Host } else { - headerValues[i] = "host:" + ctx.Request.URL.Host + headerItems[i] = "host:" + ctx.Request.URL.Host } } else { - headerValues[i] = k + ":" + - strings.Join(ctx.SignedHeaderVals[k], ",") + headerValues := make([]string, len(ctx.SignedHeaderVals[k])) + for i, v := range ctx.SignedHeaderVals[k] { + headerValues[i] = strings.TrimSpace(v) + } + headerItems[i] = k + ":" + + strings.Join(headerValues, ",") } } - stripExcessSpaces(headerValues) - ctx.canonicalHeaders = strings.Join(headerValues, "\n") + stripExcessSpaces(headerItems) + ctx.canonicalHeaders = strings.Join(headerItems, "\n") } func (ctx *signingCtx) buildCanonicalString() { @@ -691,7 +697,8 @@ func (ctx *signingCtx) buildBodyDigest() error { includeSHA256Header := ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "s3-object-lambda" || - ctx.ServiceName == "glacier" + ctx.ServiceName == "glacier" || + ctx.ServiceName == "s3-outposts" s3Presign := ctx.isPresign && (ctx.ServiceName == "s3" || diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 186b64be..0dfd96d5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.40.45" +const SDKVersion = "1.50.6" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go index 24df543d..b1b68608 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -4,6 +4,7 @@ import ( "fmt" "strconv" "strings" + "unicode" ) var ( @@ -18,7 +19,7 @@ var literalValues = [][]rune{ func isBoolValue(b []rune) bool { for _, lv := range literalValues { - if isLitValue(lv, b) { + if isCaselessLitValue(lv, b) { return true } } @@ -39,6 +40,21 @@ func isLitValue(want, have []rune) bool { return true } +// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. +func isCaselessLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != unicode.ToLower(have[i]) { + return false + } + } + + return true +} + // isNumberValue will return whether not the leading characters in // a byte slice is a number. A number is delimited by whitespace or // the newline token. @@ -138,11 +154,11 @@ func (v ValueType) String() string { // ValueType enums const ( NoneType = ValueType(iota) - DecimalType - IntegerType + DecimalType // deprecated + IntegerType // deprecated StringType QuotedStringType - BoolType + BoolType // deprecated ) // Value is a union container @@ -150,9 +166,9 @@ type Value struct { Type ValueType raw []rune - integer int64 - decimal float64 - boolean bool + integer int64 // deprecated + decimal float64 // deprecated + boolean bool // deprecated str string } @@ -177,7 +193,7 @@ func newValue(t ValueType, base int, raw []rune) (Value, error) { case QuotedStringType: v.str = string(raw[1 : len(raw)-1]) case BoolType: - v.boolean = runeCompare(v.raw, runesTrue) + v.boolean = isCaselessLitValue(runesTrue, v.raw) } // issue 2253 @@ -237,24 +253,6 @@ func newLitToken(b []rune) (Token, int, error) { } token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) } else { n, err = getValue(b) token = newToken(TokenLit, b[:n], StringType) @@ -264,18 +262,33 @@ func newLitToken(b []rune) (Token, int, error) { } // IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.raw), 0, 64) + if err != nil { + return 0, false + } + return i, true } // FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.raw), 64) + if err != nil { + return 0, false + } + return f, true } // BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if isCaselessLitValue(runesTrue, v.raw) { + return true, true + } else if isCaselessLitValue(runesFalse, v.raw) { + return false, true + } + return false, false } func isTrimmable(r rune) bool { diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go index 305999d2..b5480fde 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -57,7 +57,7 @@ func getBoolValue(b []rune) (int, error) { continue } - if isLitValue(lv, b) { + if isCaselessLitValue(lv, b) { n = len(lv) } } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go index 081cf433..1d08e138 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -145,17 +145,17 @@ func (t Section) ValueType(k string) (ValueType, bool) { } // Bool returns a bool value at k -func (t Section) Bool(k string) bool { +func (t Section) Bool(k string) (bool, bool) { return t.values[k].BoolValue() } // Int returns an integer value at k -func (t Section) Int(k string) int64 { +func (t Section) Int(k string) (int64, bool) { return t.values[k].IntValue() } // Float64 returns a float value at k -func (t Section) Float64(k string) float64 { +func (t Section) Float64(k string) (float64, bool) { return t.values[k].FloatValue() } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go index ebcbc2b4..34fea49c 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -1,9 +1,8 @@ package shareddefaults import ( - "os" + "os/user" "path/filepath" - "runtime" ) // SharedCredentialsFilename returns the SDK's default file path @@ -31,10 +30,17 @@ func SharedConfigFilename() string { // UserHomeDir returns the home directory for the user the process is // running under. func UserHomeDir() string { - if runtime.GOOS == "windows" { // Windows - return os.Getenv("USERPROFILE") + var home string + + home = userHomeDir() + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir } - // *nix - return os.Getenv("HOME") + return home } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go new file mode 100644 index 00000000..eb298ae0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go @@ -0,0 +1,18 @@ +//go:build !go1.12 +// +build !go1.12 + +package shareddefaults + +import ( + "os" + "runtime" +) + +func userHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go new file mode 100644 index 00000000..51541b50 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go @@ -0,0 +1,13 @@ +//go:build go1.12 +// +build go1.12 + +package shareddefaults + +import ( + "os" +) + +func userHomeDir() string { + home, _ := os.UserHomeDir() + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go index 864fb670..12e814dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -4,7 +4,6 @@ package jsonutil import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "math" "reflect" @@ -16,6 +15,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + var timeType = reflect.ValueOf(time.Time{}).Type() var byteSliceType = reflect.ValueOf([]byte{}).Type() @@ -82,13 +87,17 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) field, _ := value.Type().FieldByName(payload) tag = field.Tag value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { + if !value.IsValid() && tag.Get("type") != "structure" { return nil } } buf.WriteByte('{') + defer buf.WriteString("}") + + if !value.IsValid() { + return nil + } t := value.Type() first := true @@ -144,8 +153,6 @@ func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) } - buf.WriteString("}") - return nil } @@ -209,10 +216,16 @@ func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) erro buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) case reflect.Float64: f := value.Float() - if math.IsInf(f, 0) || math.IsNaN(f) { - return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + switch { + case math.IsNaN(f): + writeString(floatNaN, buf) + case math.IsInf(f, 1): + writeString(floatInf, buf) + case math.IsInf(f, -1): + writeString(floatNegInf, buf) + default: + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) } - buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) default: switch converted := value.Interface().(type) { case time.Time: diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index 8b2c9bbe..f9334879 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "math" "math/big" "reflect" "strings" @@ -258,6 +259,18 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag return err } value.Set(reflect.ValueOf(v)) + case *float64: + // These are regular strings when parsed by encoding/json's unmarshaler. + switch { + case strings.EqualFold(d, floatNaN): + value.Set(reflect.ValueOf(aws.Float64(math.NaN()))) + case strings.EqualFold(d, floatInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(1)))) + case strings.EqualFold(d, floatNegInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1)))) + default: + return fmt.Errorf("unknown JSON number value: %s", d) + } default: return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go index a029217e..d9aa2711 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -49,9 +49,8 @@ func Build(req *request.Request) { buf = emptyJSON } - if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { - req.SetBufferBody(buf) - } + // Always serialize the body, don't suppress it. + req.SetBufferBody(buf) if req.ClientInfo.TargetPrefix != "" { target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go index c0c52e2d..9c1ccde5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -13,17 +13,46 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) +const ( + awsQueryError = "x-amzn-query-error" + // A valid header example - "x-amzn-query-error": ";" + awsQueryErrorPartsCount = 2 +) + // UnmarshalTypedError provides unmarshaling errors API response errors // for both typed and untyped errors. type UnmarshalTypedError struct { - exceptions map[string]func(protocol.ResponseMetadata) error + exceptions map[string]func(protocol.ResponseMetadata) error + queryExceptions map[string]func(protocol.ResponseMetadata, string) error } // NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the // set of exception names to the error unmarshalers func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { return &UnmarshalTypedError{ - exceptions: exceptions, + exceptions: exceptions, + queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{}, + } +} + +// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError +// before returning it +func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError { + unmarshaledError := NewUnmarshalTypedError(exceptions) + for _, fn := range optFns { + fn(unmarshaledError) + } + return unmarshaledError +} + +// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions. +// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found. +// See also [awsQueryCompatible trait] +// +// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait +func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) { + return func(typedError *UnmarshalTypedError) { + typedError.queryExceptions = queryExceptions } } @@ -50,18 +79,32 @@ func (u *UnmarshalTypedError) UnmarshalError( code := codeParts[len(codeParts)-1] msg := jsonErr.Message + queryCodeParts := queryCodeParts(resp, u) + if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value + // If query-compatible exceptions are found and query-error-header is found, + // then use associated constructor to get exception with query error code. + // + // If exception code is known, use associated constructor to get a value // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) + var v error + queryErrFn, queryExceptionsFound := u.queryExceptions[code] + if len(queryCodeParts) == awsQueryErrorPartsCount && queryExceptionsFound { + v = queryErrFn(respMeta, queryCodeParts[0]) + } else { + v = fn(respMeta) + } err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) if err != nil { return nil, err } - return v, nil } + if len(queryCodeParts) == awsQueryErrorPartsCount && len(u.queryExceptions) > 0 { + code = queryCodeParts[0] + } + // fallback to unmodeled generic exceptions return awserr.NewRequestFailure( awserr.New(code, msg, nil), @@ -70,6 +113,16 @@ func (u *UnmarshalTypedError) UnmarshalError( ), nil } +// A valid header example - "x-amzn-query-error": ";" +func queryCodeParts(resp *http.Response, u *UnmarshalTypedError) []string { + queryCodeHeader := resp.Header.Get(awsQueryError) + var queryCodeParts []string + if queryCodeHeader != "" && len(u.queryExceptions) > 0 { + queryCodeParts = strings.Split(queryCodeHeader, ";") + } + return queryCodeParts +} + // UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc // protocol request errors var UnmarshalErrorHandler = request.NamedHandler{ diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 75866d01..05833405 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -3,6 +3,7 @@ package queryutil import ( "encoding/base64" "fmt" + "math" "net/url" "reflect" "sort" @@ -13,6 +14,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // Parse parses an object i and fills a url.Values object. The isEC2 flag // indicates if this is the EC2 Query sub-protocol. func Parse(body url.Values, i interface{}, isEC2 bool) error { @@ -228,9 +235,32 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta case int: v.Set(name, strconv.Itoa(value)) case float64: - v.Set(name, strconv.FormatFloat(value, 'f', -1, 64)) + var str string + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } + v.Set(name, str) case float32: - v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) + asFloat64 := float64(value) + var str string + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } + v.Set(name, str) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" format := tag.Get("timestampFormat") diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index 831b0110..2c0cbba9 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -3,6 +3,7 @@ package query import ( "encoding/xml" "fmt" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -62,7 +63,7 @@ func UnmarshalError(r *request.Request) { } r.Error = awserr.NewRequestFailure( - awserr.New(respErr.Code, respErr.Message, nil), + awserr.New(strings.TrimSpace(respErr.Code), strings.TrimSpace(respErr.Message), nil), r.HTTPResponse.StatusCode, reqID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index fb35fee5..ecc521f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "fmt" "io" + "math" "net/http" "net/url" "path" @@ -20,6 +21,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool @@ -272,7 +279,33 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) switch value := v.Interface().(type) { case string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + value = base64.StdEncoding.EncodeToString([]byte(value)) + } str = value + case []*string: + if tag.Get("location") != "header" || tag.Get("enum") == "" { + return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) + } + if len(value) == 0 { + return "", errValueNotSet + } + + buff := &bytes.Buffer{} + for i, sv := range value { + if sv == nil || len(*sv) == 0 { + continue + } + if i != 0 { + buff.WriteRune(',') + } + item := *sv + if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 { + item = strconv.Quote(item) + } + buff.WriteString(item) + } + str = string(buff.Bytes()) case []byte: str = base64.StdEncoding.EncodeToString(value) case bool: @@ -280,7 +313,16 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) case int64: str = strconv.FormatInt(value, 10) case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } case time.Time: format := tag.Get("timestampFormat") if len(format) == 0 { @@ -306,5 +348,6 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) return "", err } + return str, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go index 4366de2e..b54c99ed 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -28,18 +28,27 @@ func PayloadMember(i interface{}) interface{} { return nil } -// PayloadType returns the type of a payload field member of i if there is one, or "". +const nopayloadPayloadType = "nopayload" + +// PayloadType returns the type of a payload field member of i if there is one, +// or "". func PayloadType(i interface{}) string { v := reflect.Indirect(reflect.ValueOf(i)) if !v.IsValid() { return "" } + if field, ok := v.Type().FieldByName("_"); ok { + if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" { + return nopayloadPayloadType + } + if payloadName := field.Tag.Get("payload"); payloadName != "" { if member, ok := v.Type().FieldByName(payloadName); ok { return member.Tag.Get("type") } } } + return "" } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 92f8b4d9..79fcf169 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "net/http" "reflect" "strconv" @@ -140,7 +141,7 @@ func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHe prefix := field.Tag.Get("locationName") err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) if err != nil { - awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } } } @@ -204,6 +205,13 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro switch v.Interface().(type) { case *string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return fmt.Errorf("failed to decode JSONValue, %v", err) + } + header = string(b) + } v.Set(reflect.ValueOf(&header)) case []byte: b, err := base64.StdEncoding.DecodeString(header) @@ -224,9 +232,20 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro } v.Set(reflect.ValueOf(&i)) case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err + var f float64 + switch { + case strings.EqualFold(header, floatNaN): + f = math.NaN() + case strings.EqualFold(header, floatInf): + f = math.Inf(1) + case strings.EqualFold(header, floatNegInf): + f = math.Inf(-1) + default: + var err error + f, err = strconv.ParseFloat(header, 64) + if err != nil { + return err + } } v.Set(reflect.ValueOf(&f)) case *time.Time: diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go index d756d8cc..5366a646 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -2,6 +2,7 @@ package restjson import ( "bytes" + "encoding/json" "io" "io/ioutil" "net/http" @@ -40,52 +41,30 @@ func (u *UnmarshalTypedError) UnmarshalError( resp *http.Response, respMeta protocol.ResponseMetadata, ) (error, error) { - - code := resp.Header.Get(errorTypeHeader) - msg := resp.Header.Get(errorMessageHeader) - - body := resp.Body - if len(code) == 0 { - // If unable to get code from HTTP headers have to parse JSON message - // to determine what kind of exception this will be. - var buf bytes.Buffer - var jsonErr jsonErrorResponse - teeReader := io.TeeReader(resp.Body, &buf) - err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) - if err != nil { - return nil, err - } - - body = ioutil.NopCloser(&buf) - code = jsonErr.Code - msg = jsonErr.Message + code, msg, err := unmarshalErrorInfo(resp) + if err != nil { + return nil, err } - // If code has colon separators remove them so can compare against modeled - // exception names. - code = strings.SplitN(code, ":", 2)[0] - - if fn, ok := u.exceptions[code]; ok { - // If exception code is know, use associated constructor to get a value - // for the exception that the JSON body can be unmarshaled into. - v := fn(respMeta) - if err := jsonutil.UnmarshalJSONCaseInsensitive(v, body); err != nil { - return nil, err - } + fn, ok := u.exceptions[code] + if !ok { + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil + } - if err := rest.UnmarshalResponse(resp, v, true); err != nil { - return nil, err - } + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, resp.Body); err != nil { + return nil, err + } - return v, nil + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err } - // fallback to unmodeled generic exceptions - return awserr.NewRequestFailure( - awserr.New(code, msg, nil), - respMeta.StatusCode, - respMeta.RequestID, - ), nil + return v, nil } // UnmarshalErrorHandler is a named request handler for unmarshaling restjson @@ -99,36 +78,80 @@ var UnmarshalErrorHandler = request.NamedHandler{ func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - var jsonErr jsonErrorResponse - err := jsonutil.UnmarshalJSONError(&jsonErr, r.HTTPResponse.Body) + code, msg, err := unmarshalErrorInfo(r.HTTPResponse) if err != nil { r.Error = awserr.NewRequestFailure( - awserr.New(request.ErrCodeSerialization, - "failed to unmarshal response error", err), + awserr.New(request.ErrCodeSerialization, "failed to unmarshal response error", err), r.HTTPResponse.StatusCode, r.RequestID, ) return } - code := r.HTTPResponse.Header.Get(errorTypeHeader) - if code == "" { - code = jsonErr.Code - } - msg := r.HTTPResponse.Header.Get(errorMessageHeader) - if msg == "" { - msg = jsonErr.Message - } - - code = strings.SplitN(code, ":", 2)[0] r.Error = awserr.NewRequestFailure( - awserr.New(code, jsonErr.Message, nil), + awserr.New(code, msg, nil), r.HTTPResponse.StatusCode, r.RequestID, ) } type jsonErrorResponse struct { + Type string `json:"__type"` Code string `json:"code"` Message string `json:"message"` } + +func (j *jsonErrorResponse) SanitizedCode() string { + code := j.Code + if len(j.Type) > 0 { + code = j.Type + } + return sanitizeCode(code) +} + +// Remove superfluous components from a restJson error code. +// - If a : character is present, then take only the contents before the +// first : character in the value. +// - If a # character is present, then take only the contents after the first +// # character in the value. +// +// All of the following error values resolve to FooError: +// - FooError +// - FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +// - aws.protocoltests.restjson#FooError +// - aws.protocoltests.restjson#FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +func sanitizeCode(code string) string { + noColon := strings.SplitN(code, ":", 2)[0] + hashSplit := strings.SplitN(noColon, "#", 2) + return hashSplit[len(hashSplit)-1] +} + +// attempt to garner error details from the response, preferring header values +// when present +func unmarshalErrorInfo(resp *http.Response) (code string, msg string, err error) { + code = sanitizeCode(resp.Header.Get(errorTypeHeader)) + msg = resp.Header.Get(errorMessageHeader) + if len(code) > 0 && len(msg) > 0 { + return + } + + // a modeled error will have to be re-deserialized later, so the body must + // be preserved + var buf bytes.Buffer + tee := io.TeeReader(resp.Body, &buf) + defer func() { resp.Body = ioutil.NopCloser(&buf) }() + + var jsonErr jsonErrorResponse + if decodeErr := json.NewDecoder(tee).Decode(&jsonErr); decodeErr != nil && decodeErr != io.EOF { + err = awserr.NewUnmarshalError(decodeErr, "failed to decode response body", buf.Bytes()) + return + } + + if len(code) == 0 { + code = jsonErr.SanitizedCode() + } + if len(msg) == 0 { + msg = jsonErr.Message + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index 2fbb93ae..58c12bd8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -5,6 +5,7 @@ import ( "encoding/base64" "encoding/xml" "fmt" + "math" "reflect" "sort" "strconv" @@ -14,6 +15,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + // BuildXML will serialize params into an xml.Encoder. Error will be returned // if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { @@ -275,6 +282,7 @@ func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect // Error will be returned if the value type is unsupported. func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { var str string + switch converted := value.Interface().(type) { case string: str = converted @@ -289,9 +297,29 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case int: str = strconv.Itoa(converted) case float64: - str = strconv.FormatFloat(converted, 'f', -1, 64) + switch { + case math.IsNaN(converted): + str = floatNaN + case math.IsInf(converted, 1): + str = floatInf + case math.IsInf(converted, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(converted, 'f', -1, 64) + } case float32: - str = strconv.FormatFloat(float64(converted), 'f', -1, 32) + // The SDK doesn't render float32 values in types, only float64. This case would never be hit currently. + asFloat64 := float64(converted) + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } case time.Time: format := tag.Get("timestampFormat") if len(format) == 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 107c053f..44a580a9 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -6,6 +6,7 @@ import ( "encoding/xml" "fmt" "io" + "math" "reflect" "strconv" "strings" @@ -276,9 +277,20 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *float64: - v, err := strconv.ParseFloat(node.Text, 64) - if err != nil { - return err + var v float64 + switch { + case strings.EqualFold(node.Text, floatNaN): + v = math.NaN() + case strings.EqualFold(node.Text, floatInf): + v = math.Inf(1) + case strings.EqualFold(node.Text, floatNegInf): + v = math.Inf(-1) + default: + var err error + v, err = strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } } r.Set(reflect.ValueOf(&v)) case *time.Time: diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/api.go b/vendor/github.com/aws/aws-sdk-go/service/athena/api.go index 56319f86..1cf9a2c9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/api.go @@ -29,14 +29,13 @@ const opBatchGetNamedQuery = "BatchGetNamedQuery" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the BatchGetNamedQueryRequest method. +// req, resp := client.BatchGetNamedQueryRequest(params) // -// // Example sending a request using the BatchGetNamedQueryRequest method. -// req, resp := client.BatchGetNamedQueryRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetNamedQuery func (c *Athena) BatchGetNamedQueryRequest(input *BatchGetNamedQueryInput) (req *request.Request, output *BatchGetNamedQueryOutput) { @@ -75,13 +74,14 @@ func (c *Athena) BatchGetNamedQueryRequest(input *BatchGetNamedQueryInput) (req // API operation BatchGetNamedQuery for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // // See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetNamedQuery func (c *Athena) BatchGetNamedQuery(input *BatchGetNamedQueryInput) (*BatchGetNamedQueryOutput, error) { @@ -105,3538 +105,7129 @@ func (c *Athena) BatchGetNamedQueryWithContext(ctx aws.Context, input *BatchGetN return out, req.Send() } -const opBatchGetQueryExecution = "BatchGetQueryExecution" +const opBatchGetPreparedStatement = "BatchGetPreparedStatement" -// BatchGetQueryExecutionRequest generates a "aws/request.Request" representing the -// client's request for the BatchGetQueryExecution operation. The "output" return +// BatchGetPreparedStatementRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetPreparedStatement operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See BatchGetQueryExecution for more information on using the BatchGetQueryExecution +// See BatchGetPreparedStatement for more information on using the BatchGetPreparedStatement // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the BatchGetPreparedStatementRequest method. +// req, resp := client.BatchGetPreparedStatementRequest(params) // -// // Example sending a request using the BatchGetQueryExecutionRequest method. -// req, resp := client.BatchGetQueryExecutionRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetQueryExecution -func (c *Athena) BatchGetQueryExecutionRequest(input *BatchGetQueryExecutionInput) (req *request.Request, output *BatchGetQueryExecutionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetPreparedStatement +func (c *Athena) BatchGetPreparedStatementRequest(input *BatchGetPreparedStatementInput) (req *request.Request, output *BatchGetPreparedStatementOutput) { op := &request.Operation{ - Name: opBatchGetQueryExecution, + Name: opBatchGetPreparedStatement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &BatchGetQueryExecutionInput{} + input = &BatchGetPreparedStatementInput{} } - output = &BatchGetQueryExecutionOutput{} + output = &BatchGetPreparedStatementOutput{} req = c.newRequest(op, input, output) return } -// BatchGetQueryExecution API operation for Amazon Athena. +// BatchGetPreparedStatement API operation for Amazon Athena. // -// Returns the details of a single query execution or a list of up to 50 query -// executions, which you provide as an array of query execution ID strings. -// Requires you to have access to the workgroup in which the queries ran. To -// get a list of query execution IDs, use ListQueryExecutionsInput$WorkGroup. -// Query executions differ from named (saved) queries. Use BatchGetNamedQueryInput -// to get details about named queries. +// Returns the details of a single prepared statement or a list of up to 256 +// prepared statements for the array of prepared statement names that you provide. +// Requires you to have access to the workgroup to which the prepared statements +// belong. If a prepared statement cannot be retrieved for the name specified, +// the statement is listed in UnprocessedPreparedStatementNames. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation BatchGetQueryExecution for usage and error information. +// API operation BatchGetPreparedStatement for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetQueryExecution -func (c *Athena) BatchGetQueryExecution(input *BatchGetQueryExecutionInput) (*BatchGetQueryExecutionOutput, error) { - req, out := c.BatchGetQueryExecutionRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetPreparedStatement +func (c *Athena) BatchGetPreparedStatement(input *BatchGetPreparedStatementInput) (*BatchGetPreparedStatementOutput, error) { + req, out := c.BatchGetPreparedStatementRequest(input) return out, req.Send() } -// BatchGetQueryExecutionWithContext is the same as BatchGetQueryExecution with the addition of +// BatchGetPreparedStatementWithContext is the same as BatchGetPreparedStatement with the addition of // the ability to pass a context and additional request options. // -// See BatchGetQueryExecution for details on how to use this API operation. +// See BatchGetPreparedStatement for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) BatchGetQueryExecutionWithContext(ctx aws.Context, input *BatchGetQueryExecutionInput, opts ...request.Option) (*BatchGetQueryExecutionOutput, error) { - req, out := c.BatchGetQueryExecutionRequest(input) +func (c *Athena) BatchGetPreparedStatementWithContext(ctx aws.Context, input *BatchGetPreparedStatementInput, opts ...request.Option) (*BatchGetPreparedStatementOutput, error) { + req, out := c.BatchGetPreparedStatementRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateDataCatalog = "CreateDataCatalog" +const opBatchGetQueryExecution = "BatchGetQueryExecution" -// CreateDataCatalogRequest generates a "aws/request.Request" representing the -// client's request for the CreateDataCatalog operation. The "output" return +// BatchGetQueryExecutionRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetQueryExecution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateDataCatalog for more information on using the CreateDataCatalog +// See BatchGetQueryExecution for more information on using the BatchGetQueryExecution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the BatchGetQueryExecutionRequest method. +// req, resp := client.BatchGetQueryExecutionRequest(params) // -// // Example sending a request using the CreateDataCatalogRequest method. -// req, resp := client.CreateDataCatalogRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateDataCatalog -func (c *Athena) CreateDataCatalogRequest(input *CreateDataCatalogInput) (req *request.Request, output *CreateDataCatalogOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetQueryExecution +func (c *Athena) BatchGetQueryExecutionRequest(input *BatchGetQueryExecutionInput) (req *request.Request, output *BatchGetQueryExecutionOutput) { op := &request.Operation{ - Name: opCreateDataCatalog, + Name: opBatchGetQueryExecution, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &CreateDataCatalogInput{} + input = &BatchGetQueryExecutionInput{} } - output = &CreateDataCatalogOutput{} + output = &BatchGetQueryExecutionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// CreateDataCatalog API operation for Amazon Athena. +// BatchGetQueryExecution API operation for Amazon Athena. // -// Creates (registers) a data catalog with the specified name and properties. -// Catalogs created are visible to all users of the same Amazon Web Services -// account. +// Returns the details of a single query execution or a list of up to 50 query +// executions, which you provide as an array of query execution ID strings. +// Requires you to have access to the workgroup in which the queries ran. To +// get a list of query execution IDs, use ListQueryExecutionsInput$WorkGroup. +// Query executions differ from named (saved) queries. Use BatchGetNamedQueryInput +// to get details about named queries. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation CreateDataCatalog for usage and error information. +// API operation BatchGetQueryExecution for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateDataCatalog -func (c *Athena) CreateDataCatalog(input *CreateDataCatalogInput) (*CreateDataCatalogOutput, error) { - req, out := c.CreateDataCatalogRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/BatchGetQueryExecution +func (c *Athena) BatchGetQueryExecution(input *BatchGetQueryExecutionInput) (*BatchGetQueryExecutionOutput, error) { + req, out := c.BatchGetQueryExecutionRequest(input) return out, req.Send() } -// CreateDataCatalogWithContext is the same as CreateDataCatalog with the addition of +// BatchGetQueryExecutionWithContext is the same as BatchGetQueryExecution with the addition of // the ability to pass a context and additional request options. // -// See CreateDataCatalog for details on how to use this API operation. +// See BatchGetQueryExecution for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) CreateDataCatalogWithContext(ctx aws.Context, input *CreateDataCatalogInput, opts ...request.Option) (*CreateDataCatalogOutput, error) { - req, out := c.CreateDataCatalogRequest(input) +func (c *Athena) BatchGetQueryExecutionWithContext(ctx aws.Context, input *BatchGetQueryExecutionInput, opts ...request.Option) (*BatchGetQueryExecutionOutput, error) { + req, out := c.BatchGetQueryExecutionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateNamedQuery = "CreateNamedQuery" +const opCancelCapacityReservation = "CancelCapacityReservation" -// CreateNamedQueryRequest generates a "aws/request.Request" representing the -// client's request for the CreateNamedQuery operation. The "output" return +// CancelCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the CancelCapacityReservation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateNamedQuery for more information on using the CreateNamedQuery +// See CancelCapacityReservation for more information on using the CancelCapacityReservation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CancelCapacityReservationRequest method. +// req, resp := client.CancelCapacityReservationRequest(params) // -// // Example sending a request using the CreateNamedQueryRequest method. -// req, resp := client.CreateNamedQueryRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNamedQuery -func (c *Athena) CreateNamedQueryRequest(input *CreateNamedQueryInput) (req *request.Request, output *CreateNamedQueryOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CancelCapacityReservation +func (c *Athena) CancelCapacityReservationRequest(input *CancelCapacityReservationInput) (req *request.Request, output *CancelCapacityReservationOutput) { op := &request.Operation{ - Name: opCreateNamedQuery, + Name: opCancelCapacityReservation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &CreateNamedQueryInput{} + input = &CancelCapacityReservationInput{} } - output = &CreateNamedQueryOutput{} + output = &CancelCapacityReservationOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// CreateNamedQuery API operation for Amazon Athena. +// CancelCapacityReservation API operation for Amazon Athena. // -// Creates a named query in the specified workgroup. Requires that you have -// access to the workgroup. -// -// For code samples using the Amazon Web Services SDK for Java, see Examples -// and Code Samples (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) -// in the Amazon Athena User Guide. +// Cancels the capacity reservation with the specified name. Cancelled reservations +// remain in your account and will be deleted 45 days after cancellation. During +// the 45 days, you cannot re-purpose or reuse a reservation that has been cancelled, +// but you can refer to its tags and view it for historical reference. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation CreateNamedQuery for usage and error information. +// API operation CancelCapacityReservation for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNamedQuery -func (c *Athena) CreateNamedQuery(input *CreateNamedQueryInput) (*CreateNamedQueryOutput, error) { - req, out := c.CreateNamedQueryRequest(input) +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CancelCapacityReservation +func (c *Athena) CancelCapacityReservation(input *CancelCapacityReservationInput) (*CancelCapacityReservationOutput, error) { + req, out := c.CancelCapacityReservationRequest(input) return out, req.Send() } -// CreateNamedQueryWithContext is the same as CreateNamedQuery with the addition of +// CancelCapacityReservationWithContext is the same as CancelCapacityReservation with the addition of // the ability to pass a context and additional request options. // -// See CreateNamedQuery for details on how to use this API operation. +// See CancelCapacityReservation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) CreateNamedQueryWithContext(ctx aws.Context, input *CreateNamedQueryInput, opts ...request.Option) (*CreateNamedQueryOutput, error) { - req, out := c.CreateNamedQueryRequest(input) +func (c *Athena) CancelCapacityReservationWithContext(ctx aws.Context, input *CancelCapacityReservationInput, opts ...request.Option) (*CancelCapacityReservationOutput, error) { + req, out := c.CancelCapacityReservationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreatePreparedStatement = "CreatePreparedStatement" +const opCreateCapacityReservation = "CreateCapacityReservation" -// CreatePreparedStatementRequest generates a "aws/request.Request" representing the -// client's request for the CreatePreparedStatement operation. The "output" return +// CreateCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the CreateCapacityReservation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreatePreparedStatement for more information on using the CreatePreparedStatement +// See CreateCapacityReservation for more information on using the CreateCapacityReservation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateCapacityReservationRequest method. +// req, resp := client.CreateCapacityReservationRequest(params) // -// // Example sending a request using the CreatePreparedStatementRequest method. -// req, resp := client.CreatePreparedStatementRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePreparedStatement -func (c *Athena) CreatePreparedStatementRequest(input *CreatePreparedStatementInput) (req *request.Request, output *CreatePreparedStatementOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateCapacityReservation +func (c *Athena) CreateCapacityReservationRequest(input *CreateCapacityReservationInput) (req *request.Request, output *CreateCapacityReservationOutput) { op := &request.Operation{ - Name: opCreatePreparedStatement, + Name: opCreateCapacityReservation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &CreatePreparedStatementInput{} + input = &CreateCapacityReservationInput{} } - output = &CreatePreparedStatementOutput{} + output = &CreateCapacityReservationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// CreatePreparedStatement API operation for Amazon Athena. +// CreateCapacityReservation API operation for Amazon Athena. // -// Creates a prepared statement for use with SQL queries in Athena. +// Creates a capacity reservation with the specified name and number of requested +// data processing units. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation CreatePreparedStatement for usage and error information. +// API operation CreateCapacityReservation for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePreparedStatement -func (c *Athena) CreatePreparedStatement(input *CreatePreparedStatementInput) (*CreatePreparedStatementOutput, error) { - req, out := c.CreatePreparedStatementRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateCapacityReservation +func (c *Athena) CreateCapacityReservation(input *CreateCapacityReservationInput) (*CreateCapacityReservationOutput, error) { + req, out := c.CreateCapacityReservationRequest(input) return out, req.Send() } -// CreatePreparedStatementWithContext is the same as CreatePreparedStatement with the addition of +// CreateCapacityReservationWithContext is the same as CreateCapacityReservation with the addition of // the ability to pass a context and additional request options. // -// See CreatePreparedStatement for details on how to use this API operation. +// See CreateCapacityReservation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) CreatePreparedStatementWithContext(ctx aws.Context, input *CreatePreparedStatementInput, opts ...request.Option) (*CreatePreparedStatementOutput, error) { - req, out := c.CreatePreparedStatementRequest(input) +func (c *Athena) CreateCapacityReservationWithContext(ctx aws.Context, input *CreateCapacityReservationInput, opts ...request.Option) (*CreateCapacityReservationOutput, error) { + req, out := c.CreateCapacityReservationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateWorkGroup = "CreateWorkGroup" +const opCreateDataCatalog = "CreateDataCatalog" -// CreateWorkGroupRequest generates a "aws/request.Request" representing the -// client's request for the CreateWorkGroup operation. The "output" return +// CreateDataCatalogRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataCatalog operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateWorkGroup for more information on using the CreateWorkGroup +// See CreateDataCatalog for more information on using the CreateDataCatalog // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateDataCatalogRequest method. +// req, resp := client.CreateDataCatalogRequest(params) // -// // Example sending a request using the CreateWorkGroupRequest method. -// req, resp := client.CreateWorkGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateWorkGroup -func (c *Athena) CreateWorkGroupRequest(input *CreateWorkGroupInput) (req *request.Request, output *CreateWorkGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateDataCatalog +func (c *Athena) CreateDataCatalogRequest(input *CreateDataCatalogInput) (req *request.Request, output *CreateDataCatalogOutput) { op := &request.Operation{ - Name: opCreateWorkGroup, + Name: opCreateDataCatalog, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &CreateWorkGroupInput{} + input = &CreateDataCatalogInput{} } - output = &CreateWorkGroupOutput{} + output = &CreateDataCatalogOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// CreateWorkGroup API operation for Amazon Athena. +// CreateDataCatalog API operation for Amazon Athena. // -// Creates a workgroup with the specified name. +// Creates (registers) a data catalog with the specified name and properties. +// Catalogs created are visible to all users of the same Amazon Web Services +// account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation CreateWorkGroup for usage and error information. +// API operation CreateDataCatalog for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateWorkGroup -func (c *Athena) CreateWorkGroup(input *CreateWorkGroupInput) (*CreateWorkGroupOutput, error) { - req, out := c.CreateWorkGroupRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateDataCatalog +func (c *Athena) CreateDataCatalog(input *CreateDataCatalogInput) (*CreateDataCatalogOutput, error) { + req, out := c.CreateDataCatalogRequest(input) return out, req.Send() } -// CreateWorkGroupWithContext is the same as CreateWorkGroup with the addition of +// CreateDataCatalogWithContext is the same as CreateDataCatalog with the addition of // the ability to pass a context and additional request options. // -// See CreateWorkGroup for details on how to use this API operation. +// See CreateDataCatalog for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) CreateWorkGroupWithContext(ctx aws.Context, input *CreateWorkGroupInput, opts ...request.Option) (*CreateWorkGroupOutput, error) { - req, out := c.CreateWorkGroupRequest(input) +func (c *Athena) CreateDataCatalogWithContext(ctx aws.Context, input *CreateDataCatalogInput, opts ...request.Option) (*CreateDataCatalogOutput, error) { + req, out := c.CreateDataCatalogRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteDataCatalog = "DeleteDataCatalog" +const opCreateNamedQuery = "CreateNamedQuery" -// DeleteDataCatalogRequest generates a "aws/request.Request" representing the -// client's request for the DeleteDataCatalog operation. The "output" return +// CreateNamedQueryRequest generates a "aws/request.Request" representing the +// client's request for the CreateNamedQuery operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteDataCatalog for more information on using the DeleteDataCatalog +// See CreateNamedQuery for more information on using the CreateNamedQuery // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateNamedQueryRequest method. +// req, resp := client.CreateNamedQueryRequest(params) // -// // Example sending a request using the DeleteDataCatalogRequest method. -// req, resp := client.DeleteDataCatalogRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteDataCatalog -func (c *Athena) DeleteDataCatalogRequest(input *DeleteDataCatalogInput) (req *request.Request, output *DeleteDataCatalogOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNamedQuery +func (c *Athena) CreateNamedQueryRequest(input *CreateNamedQueryInput) (req *request.Request, output *CreateNamedQueryOutput) { op := &request.Operation{ - Name: opDeleteDataCatalog, + Name: opCreateNamedQuery, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteDataCatalogInput{} + input = &CreateNamedQueryInput{} } - output = &DeleteDataCatalogOutput{} + output = &CreateNamedQueryOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteDataCatalog API operation for Amazon Athena. +// CreateNamedQuery API operation for Amazon Athena. // -// Deletes a data catalog. +// Creates a named query in the specified workgroup. Requires that you have +// access to the workgroup. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation DeleteDataCatalog for usage and error information. +// API operation CreateNamedQuery for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteDataCatalog -func (c *Athena) DeleteDataCatalog(input *DeleteDataCatalogInput) (*DeleteDataCatalogOutput, error) { - req, out := c.DeleteDataCatalogRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNamedQuery +func (c *Athena) CreateNamedQuery(input *CreateNamedQueryInput) (*CreateNamedQueryOutput, error) { + req, out := c.CreateNamedQueryRequest(input) return out, req.Send() } -// DeleteDataCatalogWithContext is the same as DeleteDataCatalog with the addition of +// CreateNamedQueryWithContext is the same as CreateNamedQuery with the addition of // the ability to pass a context and additional request options. // -// See DeleteDataCatalog for details on how to use this API operation. +// See CreateNamedQuery for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) DeleteDataCatalogWithContext(ctx aws.Context, input *DeleteDataCatalogInput, opts ...request.Option) (*DeleteDataCatalogOutput, error) { - req, out := c.DeleteDataCatalogRequest(input) +func (c *Athena) CreateNamedQueryWithContext(ctx aws.Context, input *CreateNamedQueryInput, opts ...request.Option) (*CreateNamedQueryOutput, error) { + req, out := c.CreateNamedQueryRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteNamedQuery = "DeleteNamedQuery" +const opCreateNotebook = "CreateNotebook" -// DeleteNamedQueryRequest generates a "aws/request.Request" representing the -// client's request for the DeleteNamedQuery operation. The "output" return +// CreateNotebookRequest generates a "aws/request.Request" representing the +// client's request for the CreateNotebook operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteNamedQuery for more information on using the DeleteNamedQuery +// See CreateNotebook for more information on using the CreateNotebook // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateNotebookRequest method. +// req, resp := client.CreateNotebookRequest(params) // -// // Example sending a request using the DeleteNamedQueryRequest method. -// req, resp := client.DeleteNamedQueryRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNamedQuery -func (c *Athena) DeleteNamedQueryRequest(input *DeleteNamedQueryInput) (req *request.Request, output *DeleteNamedQueryOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNotebook +func (c *Athena) CreateNotebookRequest(input *CreateNotebookInput) (req *request.Request, output *CreateNotebookOutput) { op := &request.Operation{ - Name: opDeleteNamedQuery, + Name: opCreateNotebook, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteNamedQueryInput{} + input = &CreateNotebookInput{} } - output = &DeleteNamedQueryOutput{} + output = &CreateNotebookOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteNamedQuery API operation for Amazon Athena. +// CreateNotebook API operation for Amazon Athena. // -// Deletes the named query if you have access to the workgroup in which the -// query was saved. -// -// For code samples using the Amazon Web Services SDK for Java, see Examples -// and Code Samples (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) -// in the Amazon Athena User Guide. +// Creates an empty ipynb file in the specified Apache Spark enabled workgroup. +// Throws an error if a file in the workgroup with the same name already exists. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation DeleteNamedQuery for usage and error information. +// API operation CreateNotebook for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNamedQuery -func (c *Athena) DeleteNamedQuery(input *DeleteNamedQueryInput) (*DeleteNamedQueryOutput, error) { - req, out := c.DeleteNamedQueryRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateNotebook +func (c *Athena) CreateNotebook(input *CreateNotebookInput) (*CreateNotebookOutput, error) { + req, out := c.CreateNotebookRequest(input) return out, req.Send() } -// DeleteNamedQueryWithContext is the same as DeleteNamedQuery with the addition of +// CreateNotebookWithContext is the same as CreateNotebook with the addition of // the ability to pass a context and additional request options. // -// See DeleteNamedQuery for details on how to use this API operation. +// See CreateNotebook for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) DeleteNamedQueryWithContext(ctx aws.Context, input *DeleteNamedQueryInput, opts ...request.Option) (*DeleteNamedQueryOutput, error) { - req, out := c.DeleteNamedQueryRequest(input) +func (c *Athena) CreateNotebookWithContext(ctx aws.Context, input *CreateNotebookInput, opts ...request.Option) (*CreateNotebookOutput, error) { + req, out := c.CreateNotebookRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeletePreparedStatement = "DeletePreparedStatement" +const opCreatePreparedStatement = "CreatePreparedStatement" -// DeletePreparedStatementRequest generates a "aws/request.Request" representing the -// client's request for the DeletePreparedStatement operation. The "output" return +// CreatePreparedStatementRequest generates a "aws/request.Request" representing the +// client's request for the CreatePreparedStatement operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeletePreparedStatement for more information on using the DeletePreparedStatement +// See CreatePreparedStatement for more information on using the CreatePreparedStatement // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreatePreparedStatementRequest method. +// req, resp := client.CreatePreparedStatementRequest(params) // -// // Example sending a request using the DeletePreparedStatementRequest method. -// req, resp := client.DeletePreparedStatementRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeletePreparedStatement -func (c *Athena) DeletePreparedStatementRequest(input *DeletePreparedStatementInput) (req *request.Request, output *DeletePreparedStatementOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePreparedStatement +func (c *Athena) CreatePreparedStatementRequest(input *CreatePreparedStatementInput) (req *request.Request, output *CreatePreparedStatementOutput) { op := &request.Operation{ - Name: opDeletePreparedStatement, + Name: opCreatePreparedStatement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeletePreparedStatementInput{} + input = &CreatePreparedStatementInput{} } - output = &DeletePreparedStatementOutput{} + output = &CreatePreparedStatementOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeletePreparedStatement API operation for Amazon Athena. +// CreatePreparedStatement API operation for Amazon Athena. // -// Deletes the prepared statement with the specified name from the specified -// workgroup. +// Creates a prepared statement for use with SQL queries in Athena. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation DeletePreparedStatement for usage and error information. +// API operation CreatePreparedStatement for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * ResourceNotFoundException -// A resource, such as a workgroup, was not found. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeletePreparedStatement -func (c *Athena) DeletePreparedStatement(input *DeletePreparedStatementInput) (*DeletePreparedStatementOutput, error) { - req, out := c.DeletePreparedStatementRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePreparedStatement +func (c *Athena) CreatePreparedStatement(input *CreatePreparedStatementInput) (*CreatePreparedStatementOutput, error) { + req, out := c.CreatePreparedStatementRequest(input) return out, req.Send() } -// DeletePreparedStatementWithContext is the same as DeletePreparedStatement with the addition of +// CreatePreparedStatementWithContext is the same as CreatePreparedStatement with the addition of // the ability to pass a context and additional request options. // -// See DeletePreparedStatement for details on how to use this API operation. +// See CreatePreparedStatement for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) DeletePreparedStatementWithContext(ctx aws.Context, input *DeletePreparedStatementInput, opts ...request.Option) (*DeletePreparedStatementOutput, error) { - req, out := c.DeletePreparedStatementRequest(input) +func (c *Athena) CreatePreparedStatementWithContext(ctx aws.Context, input *CreatePreparedStatementInput, opts ...request.Option) (*CreatePreparedStatementOutput, error) { + req, out := c.CreatePreparedStatementRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteWorkGroup = "DeleteWorkGroup" +const opCreatePresignedNotebookUrl = "CreatePresignedNotebookUrl" -// DeleteWorkGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteWorkGroup operation. The "output" return +// CreatePresignedNotebookUrlRequest generates a "aws/request.Request" representing the +// client's request for the CreatePresignedNotebookUrl operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteWorkGroup for more information on using the DeleteWorkGroup +// See CreatePresignedNotebookUrl for more information on using the CreatePresignedNotebookUrl // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreatePresignedNotebookUrlRequest method. +// req, resp := client.CreatePresignedNotebookUrlRequest(params) // -// // Example sending a request using the DeleteWorkGroupRequest method. -// req, resp := client.DeleteWorkGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteWorkGroup -func (c *Athena) DeleteWorkGroupRequest(input *DeleteWorkGroupInput) (req *request.Request, output *DeleteWorkGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePresignedNotebookUrl +func (c *Athena) CreatePresignedNotebookUrlRequest(input *CreatePresignedNotebookUrlInput) (req *request.Request, output *CreatePresignedNotebookUrlOutput) { op := &request.Operation{ - Name: opDeleteWorkGroup, + Name: opCreatePresignedNotebookUrl, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteWorkGroupInput{} + input = &CreatePresignedNotebookUrlInput{} } - output = &DeleteWorkGroupOutput{} + output = &CreatePresignedNotebookUrlOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteWorkGroup API operation for Amazon Athena. +// CreatePresignedNotebookUrl API operation for Amazon Athena. // -// Deletes the workgroup with the specified name. The primary workgroup cannot -// be deleted. +// Gets an authentication token and the URL at which the notebook can be accessed. +// During programmatic access, CreatePresignedNotebookUrl must be called every +// 10 minutes to refresh the authentication token. For information about granting +// programmatic access, see Grant programmatic access (https://docs.aws.amazon.com/athena/latest/ug/setting-up.html#setting-up-grant-programmatic-access). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation DeleteWorkGroup for usage and error information. +// API operation CreatePresignedNotebookUrl for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteWorkGroup -func (c *Athena) DeleteWorkGroup(input *DeleteWorkGroupInput) (*DeleteWorkGroupOutput, error) { - req, out := c.DeleteWorkGroupRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreatePresignedNotebookUrl +func (c *Athena) CreatePresignedNotebookUrl(input *CreatePresignedNotebookUrlInput) (*CreatePresignedNotebookUrlOutput, error) { + req, out := c.CreatePresignedNotebookUrlRequest(input) return out, req.Send() } -// DeleteWorkGroupWithContext is the same as DeleteWorkGroup with the addition of +// CreatePresignedNotebookUrlWithContext is the same as CreatePresignedNotebookUrl with the addition of // the ability to pass a context and additional request options. // -// See DeleteWorkGroup for details on how to use this API operation. +// See CreatePresignedNotebookUrl for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) DeleteWorkGroupWithContext(ctx aws.Context, input *DeleteWorkGroupInput, opts ...request.Option) (*DeleteWorkGroupOutput, error) { - req, out := c.DeleteWorkGroupRequest(input) +func (c *Athena) CreatePresignedNotebookUrlWithContext(ctx aws.Context, input *CreatePresignedNotebookUrlInput, opts ...request.Option) (*CreatePresignedNotebookUrlOutput, error) { + req, out := c.CreatePresignedNotebookUrlRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetDataCatalog = "GetDataCatalog" +const opCreateWorkGroup = "CreateWorkGroup" -// GetDataCatalogRequest generates a "aws/request.Request" representing the -// client's request for the GetDataCatalog operation. The "output" return +// CreateWorkGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateWorkGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetDataCatalog for more information on using the GetDataCatalog +// See CreateWorkGroup for more information on using the CreateWorkGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the CreateWorkGroupRequest method. +// req, resp := client.CreateWorkGroupRequest(params) // -// // Example sending a request using the GetDataCatalogRequest method. -// req, resp := client.GetDataCatalogRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDataCatalog -func (c *Athena) GetDataCatalogRequest(input *GetDataCatalogInput) (req *request.Request, output *GetDataCatalogOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateWorkGroup +func (c *Athena) CreateWorkGroupRequest(input *CreateWorkGroupInput) (req *request.Request, output *CreateWorkGroupOutput) { op := &request.Operation{ - Name: opGetDataCatalog, + Name: opCreateWorkGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetDataCatalogInput{} + input = &CreateWorkGroupInput{} } - output = &GetDataCatalogOutput{} + output = &CreateWorkGroupOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetDataCatalog API operation for Amazon Athena. +// CreateWorkGroup API operation for Amazon Athena. // -// Returns the specified data catalog. +// Creates a workgroup with the specified name. A workgroup can be an Apache +// Spark enabled workgroup or an Athena SQL workgroup. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation GetDataCatalog for usage and error information. +// API operation CreateWorkGroup for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDataCatalog -func (c *Athena) GetDataCatalog(input *GetDataCatalogInput) (*GetDataCatalogOutput, error) { - req, out := c.GetDataCatalogRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateWorkGroup +func (c *Athena) CreateWorkGroup(input *CreateWorkGroupInput) (*CreateWorkGroupOutput, error) { + req, out := c.CreateWorkGroupRequest(input) return out, req.Send() } -// GetDataCatalogWithContext is the same as GetDataCatalog with the addition of +// CreateWorkGroupWithContext is the same as CreateWorkGroup with the addition of // the ability to pass a context and additional request options. // -// See GetDataCatalog for details on how to use this API operation. +// See CreateWorkGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) GetDataCatalogWithContext(ctx aws.Context, input *GetDataCatalogInput, opts ...request.Option) (*GetDataCatalogOutput, error) { - req, out := c.GetDataCatalogRequest(input) +func (c *Athena) CreateWorkGroupWithContext(ctx aws.Context, input *CreateWorkGroupInput, opts ...request.Option) (*CreateWorkGroupOutput, error) { + req, out := c.CreateWorkGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetDatabase = "GetDatabase" +const opDeleteCapacityReservation = "DeleteCapacityReservation" -// GetDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the GetDatabase operation. The "output" return +// DeleteCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCapacityReservation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetDatabase for more information on using the GetDatabase +// See DeleteCapacityReservation for more information on using the DeleteCapacityReservation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteCapacityReservationRequest method. +// req, resp := client.DeleteCapacityReservationRequest(params) // -// // Example sending a request using the GetDatabaseRequest method. -// req, resp := client.GetDatabaseRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDatabase -func (c *Athena) GetDatabaseRequest(input *GetDatabaseInput) (req *request.Request, output *GetDatabaseOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteCapacityReservation +func (c *Athena) DeleteCapacityReservationRequest(input *DeleteCapacityReservationInput) (req *request.Request, output *DeleteCapacityReservationOutput) { op := &request.Operation{ - Name: opGetDatabase, + Name: opDeleteCapacityReservation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetDatabaseInput{} + input = &DeleteCapacityReservationInput{} } - output = &GetDatabaseOutput{} + output = &DeleteCapacityReservationOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetDatabase API operation for Amazon Athena. +// DeleteCapacityReservation API operation for Amazon Athena. // -// Returns a database object for the specified database and data catalog. +// Deletes a cancelled capacity reservation. A reservation must be cancelled +// before it can be deleted. A deleted reservation is immediately removed from +// your account and can no longer be referenced, including by its ARN. A deleted +// reservation cannot be called by GetCapacityReservation, and deleted reservations +// do not appear in the output of ListCapacityReservations. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation GetDatabase for usage and error information. +// API operation DeleteCapacityReservation for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// * MetadataException -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDatabase -func (c *Athena) GetDatabase(input *GetDatabaseInput) (*GetDatabaseOutput, error) { - req, out := c.GetDatabaseRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteCapacityReservation +func (c *Athena) DeleteCapacityReservation(input *DeleteCapacityReservationInput) (*DeleteCapacityReservationOutput, error) { + req, out := c.DeleteCapacityReservationRequest(input) return out, req.Send() } -// GetDatabaseWithContext is the same as GetDatabase with the addition of +// DeleteCapacityReservationWithContext is the same as DeleteCapacityReservation with the addition of // the ability to pass a context and additional request options. // -// See GetDatabase for details on how to use this API operation. +// See DeleteCapacityReservation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) GetDatabaseWithContext(ctx aws.Context, input *GetDatabaseInput, opts ...request.Option) (*GetDatabaseOutput, error) { - req, out := c.GetDatabaseRequest(input) +func (c *Athena) DeleteCapacityReservationWithContext(ctx aws.Context, input *DeleteCapacityReservationInput, opts ...request.Option) (*DeleteCapacityReservationOutput, error) { + req, out := c.DeleteCapacityReservationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetNamedQuery = "GetNamedQuery" +const opDeleteDataCatalog = "DeleteDataCatalog" -// GetNamedQueryRequest generates a "aws/request.Request" representing the -// client's request for the GetNamedQuery operation. The "output" return +// DeleteDataCatalogRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataCatalog operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetNamedQuery for more information on using the GetNamedQuery +// See DeleteDataCatalog for more information on using the DeleteDataCatalog // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteDataCatalogRequest method. +// req, resp := client.DeleteDataCatalogRequest(params) // -// // Example sending a request using the GetNamedQueryRequest method. -// req, resp := client.GetNamedQueryRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNamedQuery -func (c *Athena) GetNamedQueryRequest(input *GetNamedQueryInput) (req *request.Request, output *GetNamedQueryOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteDataCatalog +func (c *Athena) DeleteDataCatalogRequest(input *DeleteDataCatalogInput) (req *request.Request, output *DeleteDataCatalogOutput) { op := &request.Operation{ - Name: opGetNamedQuery, + Name: opDeleteDataCatalog, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetNamedQueryInput{} + input = &DeleteDataCatalogInput{} } - output = &GetNamedQueryOutput{} + output = &DeleteDataCatalogOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetNamedQuery API operation for Amazon Athena. +// DeleteDataCatalog API operation for Amazon Athena. // -// Returns information about a single query. Requires that you have access to -// the workgroup in which the query was saved. +// Deletes a data catalog. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation GetNamedQuery for usage and error information. +// API operation DeleteDataCatalog for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNamedQuery -func (c *Athena) GetNamedQuery(input *GetNamedQueryInput) (*GetNamedQueryOutput, error) { - req, out := c.GetNamedQueryRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteDataCatalog +func (c *Athena) DeleteDataCatalog(input *DeleteDataCatalogInput) (*DeleteDataCatalogOutput, error) { + req, out := c.DeleteDataCatalogRequest(input) return out, req.Send() } -// GetNamedQueryWithContext is the same as GetNamedQuery with the addition of +// DeleteDataCatalogWithContext is the same as DeleteDataCatalog with the addition of // the ability to pass a context and additional request options. // -// See GetNamedQuery for details on how to use this API operation. +// See DeleteDataCatalog for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) GetNamedQueryWithContext(ctx aws.Context, input *GetNamedQueryInput, opts ...request.Option) (*GetNamedQueryOutput, error) { - req, out := c.GetNamedQueryRequest(input) +func (c *Athena) DeleteDataCatalogWithContext(ctx aws.Context, input *DeleteDataCatalogInput, opts ...request.Option) (*DeleteDataCatalogOutput, error) { + req, out := c.DeleteDataCatalogRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetPreparedStatement = "GetPreparedStatement" +const opDeleteNamedQuery = "DeleteNamedQuery" -// GetPreparedStatementRequest generates a "aws/request.Request" representing the -// client's request for the GetPreparedStatement operation. The "output" return +// DeleteNamedQueryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNamedQuery operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetPreparedStatement for more information on using the GetPreparedStatement +// See DeleteNamedQuery for more information on using the DeleteNamedQuery // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteNamedQueryRequest method. +// req, resp := client.DeleteNamedQueryRequest(params) // -// // Example sending a request using the GetPreparedStatementRequest method. -// req, resp := client.GetPreparedStatementRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetPreparedStatement -func (c *Athena) GetPreparedStatementRequest(input *GetPreparedStatementInput) (req *request.Request, output *GetPreparedStatementOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNamedQuery +func (c *Athena) DeleteNamedQueryRequest(input *DeleteNamedQueryInput) (req *request.Request, output *DeleteNamedQueryOutput) { op := &request.Operation{ - Name: opGetPreparedStatement, + Name: opDeleteNamedQuery, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetPreparedStatementInput{} + input = &DeleteNamedQueryInput{} } - output = &GetPreparedStatementOutput{} + output = &DeleteNamedQueryOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetPreparedStatement API operation for Amazon Athena. +// DeleteNamedQuery API operation for Amazon Athena. // -// Retrieves the prepared statement with the specified name from the specified -// workgroup. +// Deletes the named query if you have access to the workgroup in which the +// query was saved. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation GetPreparedStatement for usage and error information. +// API operation DeleteNamedQuery for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * ResourceNotFoundException -// A resource, such as a workgroup, was not found. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetPreparedStatement -func (c *Athena) GetPreparedStatement(input *GetPreparedStatementInput) (*GetPreparedStatementOutput, error) { - req, out := c.GetPreparedStatementRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNamedQuery +func (c *Athena) DeleteNamedQuery(input *DeleteNamedQueryInput) (*DeleteNamedQueryOutput, error) { + req, out := c.DeleteNamedQueryRequest(input) return out, req.Send() } -// GetPreparedStatementWithContext is the same as GetPreparedStatement with the addition of +// DeleteNamedQueryWithContext is the same as DeleteNamedQuery with the addition of // the ability to pass a context and additional request options. // -// See GetPreparedStatement for details on how to use this API operation. +// See DeleteNamedQuery for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) GetPreparedStatementWithContext(ctx aws.Context, input *GetPreparedStatementInput, opts ...request.Option) (*GetPreparedStatementOutput, error) { - req, out := c.GetPreparedStatementRequest(input) +func (c *Athena) DeleteNamedQueryWithContext(ctx aws.Context, input *DeleteNamedQueryInput, opts ...request.Option) (*DeleteNamedQueryOutput, error) { + req, out := c.DeleteNamedQueryRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetQueryExecution = "GetQueryExecution" +const opDeleteNotebook = "DeleteNotebook" -// GetQueryExecutionRequest generates a "aws/request.Request" representing the -// client's request for the GetQueryExecution operation. The "output" return +// DeleteNotebookRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNotebook operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetQueryExecution for more information on using the GetQueryExecution +// See DeleteNotebook for more information on using the DeleteNotebook // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteNotebookRequest method. +// req, resp := client.DeleteNotebookRequest(params) // -// // Example sending a request using the GetQueryExecutionRequest method. -// req, resp := client.GetQueryExecutionRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryExecution -func (c *Athena) GetQueryExecutionRequest(input *GetQueryExecutionInput) (req *request.Request, output *GetQueryExecutionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNotebook +func (c *Athena) DeleteNotebookRequest(input *DeleteNotebookInput) (req *request.Request, output *DeleteNotebookOutput) { op := &request.Operation{ - Name: opGetQueryExecution, + Name: opDeleteNotebook, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetQueryExecutionInput{} + input = &DeleteNotebookInput{} } - output = &GetQueryExecutionOutput{} + output = &DeleteNotebookOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetQueryExecution API operation for Amazon Athena. +// DeleteNotebook API operation for Amazon Athena. // -// Returns information about a single execution of a query if you have access -// to the workgroup in which the query ran. Each time a query executes, information -// about the query execution is saved with a unique ID. +// Deletes the specified notebook. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation GetQueryExecution for usage and error information. +// API operation DeleteNotebook for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryExecution -func (c *Athena) GetQueryExecution(input *GetQueryExecutionInput) (*GetQueryExecutionOutput, error) { - req, out := c.GetQueryExecutionRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteNotebook +func (c *Athena) DeleteNotebook(input *DeleteNotebookInput) (*DeleteNotebookOutput, error) { + req, out := c.DeleteNotebookRequest(input) return out, req.Send() } -// GetQueryExecutionWithContext is the same as GetQueryExecution with the addition of +// DeleteNotebookWithContext is the same as DeleteNotebook with the addition of // the ability to pass a context and additional request options. // -// See GetQueryExecution for details on how to use this API operation. +// See DeleteNotebook for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) GetQueryExecutionWithContext(ctx aws.Context, input *GetQueryExecutionInput, opts ...request.Option) (*GetQueryExecutionOutput, error) { - req, out := c.GetQueryExecutionRequest(input) +func (c *Athena) DeleteNotebookWithContext(ctx aws.Context, input *DeleteNotebookInput, opts ...request.Option) (*DeleteNotebookOutput, error) { + req, out := c.DeleteNotebookRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetQueryResults = "GetQueryResults" +const opDeletePreparedStatement = "DeletePreparedStatement" -// GetQueryResultsRequest generates a "aws/request.Request" representing the -// client's request for the GetQueryResults operation. The "output" return +// DeletePreparedStatementRequest generates a "aws/request.Request" representing the +// client's request for the DeletePreparedStatement operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetQueryResults for more information on using the GetQueryResults +// See DeletePreparedStatement for more information on using the DeletePreparedStatement // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeletePreparedStatementRequest method. +// req, resp := client.DeletePreparedStatementRequest(params) // -// // Example sending a request using the GetQueryResultsRequest method. -// req, resp := client.GetQueryResultsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryResults -func (c *Athena) GetQueryResultsRequest(input *GetQueryResultsInput) (req *request.Request, output *GetQueryResultsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeletePreparedStatement +func (c *Athena) DeletePreparedStatementRequest(input *DeletePreparedStatementInput) (req *request.Request, output *DeletePreparedStatementOutput) { op := &request.Operation{ - Name: opGetQueryResults, + Name: opDeletePreparedStatement, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &GetQueryResultsInput{} + input = &DeletePreparedStatementInput{} } - output = &GetQueryResultsOutput{} + output = &DeletePreparedStatementOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetQueryResults API operation for Amazon Athena. -// -// Streams the results of a single query execution specified by QueryExecutionId -// from the Athena query results location in Amazon S3. For more information, -// see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html) -// in the Amazon Athena User Guide. This request does not execute the query -// but returns results. Use StartQueryExecution to run a query. -// -// To stream query results successfully, the IAM principal with permission to -// call GetQueryResults also must have permissions to the Amazon S3 GetObject -// action for the Athena query results location. +// DeletePreparedStatement API operation for Amazon Athena. // -// IAM principals with permission to the Amazon S3 GetObject action for the -// query results location are able to retrieve query results from Amazon S3 -// even if permission to the GetQueryResults action is denied. To restrict user -// or role access, ensure that Amazon S3 permissions to the Athena query location -// are denied. +// Deletes the prepared statement with the specified name from the specified +// workgroup. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation GetQueryResults for usage and error information. +// API operation DeletePreparedStatement for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryResults -func (c *Athena) GetQueryResults(input *GetQueryResultsInput) (*GetQueryResultsOutput, error) { - req, out := c.GetQueryResultsRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeletePreparedStatement +func (c *Athena) DeletePreparedStatement(input *DeletePreparedStatementInput) (*DeletePreparedStatementOutput, error) { + req, out := c.DeletePreparedStatementRequest(input) return out, req.Send() } -// GetQueryResultsWithContext is the same as GetQueryResults with the addition of +// DeletePreparedStatementWithContext is the same as DeletePreparedStatement with the addition of // the ability to pass a context and additional request options. // -// See GetQueryResults for details on how to use this API operation. +// See DeletePreparedStatement for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) GetQueryResultsWithContext(ctx aws.Context, input *GetQueryResultsInput, opts ...request.Option) (*GetQueryResultsOutput, error) { - req, out := c.GetQueryResultsRequest(input) +func (c *Athena) DeletePreparedStatementWithContext(ctx aws.Context, input *DeletePreparedStatementInput, opts ...request.Option) (*DeletePreparedStatementOutput, error) { + req, out := c.DeletePreparedStatementRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetQueryResultsPages iterates over the pages of a GetQueryResults operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See GetQueryResults method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a GetQueryResults operation. -// pageNum := 0 -// err := client.GetQueryResultsPages(params, -// func(page *athena.GetQueryResultsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Athena) GetQueryResultsPages(input *GetQueryResultsInput, fn func(*GetQueryResultsOutput, bool) bool) error { - return c.GetQueryResultsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetQueryResultsPagesWithContext same as GetQueryResultsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) GetQueryResultsPagesWithContext(ctx aws.Context, input *GetQueryResultsInput, fn func(*GetQueryResultsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetQueryResultsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetQueryResultsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*GetQueryResultsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opGetTableMetadata = "GetTableMetadata" +const opDeleteWorkGroup = "DeleteWorkGroup" -// GetTableMetadataRequest generates a "aws/request.Request" representing the -// client's request for the GetTableMetadata operation. The "output" return +// DeleteWorkGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWorkGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetTableMetadata for more information on using the GetTableMetadata +// See DeleteWorkGroup for more information on using the DeleteWorkGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DeleteWorkGroupRequest method. +// req, resp := client.DeleteWorkGroupRequest(params) // -// // Example sending a request using the GetTableMetadataRequest method. -// req, resp := client.GetTableMetadataRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetTableMetadata -func (c *Athena) GetTableMetadataRequest(input *GetTableMetadataInput) (req *request.Request, output *GetTableMetadataOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteWorkGroup +func (c *Athena) DeleteWorkGroupRequest(input *DeleteWorkGroupInput) (req *request.Request, output *DeleteWorkGroupOutput) { op := &request.Operation{ - Name: opGetTableMetadata, + Name: opDeleteWorkGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetTableMetadataInput{} + input = &DeleteWorkGroupInput{} } - output = &GetTableMetadataOutput{} + output = &DeleteWorkGroupOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetTableMetadata API operation for Amazon Athena. +// DeleteWorkGroup API operation for Amazon Athena. // -// Returns table metadata for the specified catalog, database, and table. +// Deletes the workgroup with the specified name. The primary workgroup cannot +// be deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation GetTableMetadata for usage and error information. +// API operation DeleteWorkGroup for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * MetadataException -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetTableMetadata -func (c *Athena) GetTableMetadata(input *GetTableMetadataInput) (*GetTableMetadataOutput, error) { - req, out := c.GetTableMetadataRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteWorkGroup +func (c *Athena) DeleteWorkGroup(input *DeleteWorkGroupInput) (*DeleteWorkGroupOutput, error) { + req, out := c.DeleteWorkGroupRequest(input) return out, req.Send() } -// GetTableMetadataWithContext is the same as GetTableMetadata with the addition of +// DeleteWorkGroupWithContext is the same as DeleteWorkGroup with the addition of // the ability to pass a context and additional request options. // -// See GetTableMetadata for details on how to use this API operation. +// See DeleteWorkGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) GetTableMetadataWithContext(ctx aws.Context, input *GetTableMetadataInput, opts ...request.Option) (*GetTableMetadataOutput, error) { - req, out := c.GetTableMetadataRequest(input) +func (c *Athena) DeleteWorkGroupWithContext(ctx aws.Context, input *DeleteWorkGroupInput, opts ...request.Option) (*DeleteWorkGroupOutput, error) { + req, out := c.DeleteWorkGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetWorkGroup = "GetWorkGroup" +const opExportNotebook = "ExportNotebook" -// GetWorkGroupRequest generates a "aws/request.Request" representing the -// client's request for the GetWorkGroup operation. The "output" return +// ExportNotebookRequest generates a "aws/request.Request" representing the +// client's request for the ExportNotebook operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetWorkGroup for more information on using the GetWorkGroup +// See ExportNotebook for more information on using the ExportNotebook // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ExportNotebookRequest method. +// req, resp := client.ExportNotebookRequest(params) // -// // Example sending a request using the GetWorkGroupRequest method. -// req, resp := client.GetWorkGroupRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetWorkGroup -func (c *Athena) GetWorkGroupRequest(input *GetWorkGroupInput) (req *request.Request, output *GetWorkGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ExportNotebook +func (c *Athena) ExportNotebookRequest(input *ExportNotebookInput) (req *request.Request, output *ExportNotebookOutput) { op := &request.Operation{ - Name: opGetWorkGroup, + Name: opExportNotebook, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetWorkGroupInput{} + input = &ExportNotebookInput{} } - output = &GetWorkGroupOutput{} + output = &ExportNotebookOutput{} req = c.newRequest(op, input, output) return } -// GetWorkGroup API operation for Amazon Athena. +// ExportNotebook API operation for Amazon Athena. // -// Returns information about the workgroup with the specified name. +// Exports the specified notebook and its metadata. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation GetWorkGroup for usage and error information. +// API operation ExportNotebook for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetWorkGroup -func (c *Athena) GetWorkGroup(input *GetWorkGroupInput) (*GetWorkGroupOutput, error) { - req, out := c.GetWorkGroupRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ExportNotebook +func (c *Athena) ExportNotebook(input *ExportNotebookInput) (*ExportNotebookOutput, error) { + req, out := c.ExportNotebookRequest(input) return out, req.Send() } -// GetWorkGroupWithContext is the same as GetWorkGroup with the addition of +// ExportNotebookWithContext is the same as ExportNotebook with the addition of // the ability to pass a context and additional request options. // -// See GetWorkGroup for details on how to use this API operation. +// See ExportNotebook for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) GetWorkGroupWithContext(ctx aws.Context, input *GetWorkGroupInput, opts ...request.Option) (*GetWorkGroupOutput, error) { - req, out := c.GetWorkGroupRequest(input) +func (c *Athena) ExportNotebookWithContext(ctx aws.Context, input *ExportNotebookInput, opts ...request.Option) (*ExportNotebookOutput, error) { + req, out := c.ExportNotebookRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListDataCatalogs = "ListDataCatalogs" +const opGetCalculationExecution = "GetCalculationExecution" -// ListDataCatalogsRequest generates a "aws/request.Request" representing the -// client's request for the ListDataCatalogs operation. The "output" return +// GetCalculationExecutionRequest generates a "aws/request.Request" representing the +// client's request for the GetCalculationExecution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListDataCatalogs for more information on using the ListDataCatalogs +// See GetCalculationExecution for more information on using the GetCalculationExecution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetCalculationExecutionRequest method. +// req, resp := client.GetCalculationExecutionRequest(params) // -// // Example sending a request using the ListDataCatalogsRequest method. -// req, resp := client.ListDataCatalogsRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDataCatalogs -func (c *Athena) ListDataCatalogsRequest(input *ListDataCatalogsInput) (req *request.Request, output *ListDataCatalogsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecution +func (c *Athena) GetCalculationExecutionRequest(input *GetCalculationExecutionInput) (req *request.Request, output *GetCalculationExecutionOutput) { op := &request.Operation{ - Name: opListDataCatalogs, + Name: opGetCalculationExecution, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListDataCatalogsInput{} + input = &GetCalculationExecutionInput{} } - output = &ListDataCatalogsOutput{} + output = &GetCalculationExecutionOutput{} req = c.newRequest(op, input, output) return } -// ListDataCatalogs API operation for Amazon Athena. +// GetCalculationExecution API operation for Amazon Athena. // -// Lists the data catalogs in the current Amazon Web Services account. +// Describes a previously submitted calculation execution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListDataCatalogs for usage and error information. +// API operation GetCalculationExecution for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDataCatalogs -func (c *Athena) ListDataCatalogs(input *ListDataCatalogsInput) (*ListDataCatalogsOutput, error) { - req, out := c.ListDataCatalogsRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecution +func (c *Athena) GetCalculationExecution(input *GetCalculationExecutionInput) (*GetCalculationExecutionOutput, error) { + req, out := c.GetCalculationExecutionRequest(input) return out, req.Send() } -// ListDataCatalogsWithContext is the same as ListDataCatalogs with the addition of +// GetCalculationExecutionWithContext is the same as GetCalculationExecution with the addition of // the ability to pass a context and additional request options. // -// See ListDataCatalogs for details on how to use this API operation. +// See GetCalculationExecution for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListDataCatalogsWithContext(ctx aws.Context, input *ListDataCatalogsInput, opts ...request.Option) (*ListDataCatalogsOutput, error) { - req, out := c.ListDataCatalogsRequest(input) +func (c *Athena) GetCalculationExecutionWithContext(ctx aws.Context, input *GetCalculationExecutionInput, opts ...request.Option) (*GetCalculationExecutionOutput, error) { + req, out := c.GetCalculationExecutionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListDataCatalogsPages iterates over the pages of a ListDataCatalogs operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opGetCalculationExecutionCode = "GetCalculationExecutionCode" + +// GetCalculationExecutionCodeRequest generates a "aws/request.Request" representing the +// client's request for the GetCalculationExecutionCode operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListDataCatalogs method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See GetCalculationExecutionCode for more information on using the GetCalculationExecutionCode +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example iterating over at most 3 pages of a ListDataCatalogs operation. -// pageNum := 0 -// err := client.ListDataCatalogsPages(params, -// func(page *athena.ListDataCatalogsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// // Example sending a request using the GetCalculationExecutionCodeRequest method. +// req, resp := client.GetCalculationExecutionCodeRequest(params) // -func (c *Athena) ListDataCatalogsPages(input *ListDataCatalogsInput, fn func(*ListDataCatalogsOutput, bool) bool) error { - return c.ListDataCatalogsPagesWithContext(aws.BackgroundContext(), input, fn) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecutionCode +func (c *Athena) GetCalculationExecutionCodeRequest(input *GetCalculationExecutionCodeInput) (req *request.Request, output *GetCalculationExecutionCodeOutput) { + op := &request.Operation{ + Name: opGetCalculationExecutionCode, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCalculationExecutionCodeInput{} + } + + output = &GetCalculationExecutionCodeOutput{} + req = c.newRequest(op, input, output) + return } -// ListDataCatalogsPagesWithContext same as ListDataCatalogsPages except -// it takes a Context and allows setting request options on the pages. +// GetCalculationExecutionCode API operation for Amazon Athena. +// +// Retrieves the unencrypted code that was executed for the calculation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation GetCalculationExecutionCode for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecutionCode +func (c *Athena) GetCalculationExecutionCode(input *GetCalculationExecutionCodeInput) (*GetCalculationExecutionCodeOutput, error) { + req, out := c.GetCalculationExecutionCodeRequest(input) + return out, req.Send() +} + +// GetCalculationExecutionCodeWithContext is the same as GetCalculationExecutionCode with the addition of +// the ability to pass a context and additional request options. +// +// See GetCalculationExecutionCode for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListDataCatalogsPagesWithContext(ctx aws.Context, input *ListDataCatalogsInput, fn func(*ListDataCatalogsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDataCatalogsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDataCatalogsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDataCatalogsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() +func (c *Athena) GetCalculationExecutionCodeWithContext(ctx aws.Context, input *GetCalculationExecutionCodeInput, opts ...request.Option) (*GetCalculationExecutionCodeOutput, error) { + req, out := c.GetCalculationExecutionCodeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opListDatabases = "ListDatabases" +const opGetCalculationExecutionStatus = "GetCalculationExecutionStatus" -// ListDatabasesRequest generates a "aws/request.Request" representing the -// client's request for the ListDatabases operation. The "output" return +// GetCalculationExecutionStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetCalculationExecutionStatus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListDatabases for more information on using the ListDatabases +// See GetCalculationExecutionStatus for more information on using the GetCalculationExecutionStatus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetCalculationExecutionStatusRequest method. +// req, resp := client.GetCalculationExecutionStatusRequest(params) // -// // Example sending a request using the ListDatabasesRequest method. -// req, resp := client.ListDatabasesRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDatabases -func (c *Athena) ListDatabasesRequest(input *ListDatabasesInput) (req *request.Request, output *ListDatabasesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecutionStatus +func (c *Athena) GetCalculationExecutionStatusRequest(input *GetCalculationExecutionStatusInput) (req *request.Request, output *GetCalculationExecutionStatusOutput) { op := &request.Operation{ - Name: opListDatabases, + Name: opGetCalculationExecutionStatus, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListDatabasesInput{} + input = &GetCalculationExecutionStatusInput{} } - output = &ListDatabasesOutput{} + output = &GetCalculationExecutionStatusOutput{} req = c.newRequest(op, input, output) return } -// ListDatabases API operation for Amazon Athena. +// GetCalculationExecutionStatus API operation for Amazon Athena. // -// Lists the databases in the specified data catalog. +// Gets the status of a current calculation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListDatabases for usage and error information. +// API operation GetCalculationExecutionStatus for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * MetadataException -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDatabases -func (c *Athena) ListDatabases(input *ListDatabasesInput) (*ListDatabasesOutput, error) { - req, out := c.ListDatabasesRequest(input) +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCalculationExecutionStatus +func (c *Athena) GetCalculationExecutionStatus(input *GetCalculationExecutionStatusInput) (*GetCalculationExecutionStatusOutput, error) { + req, out := c.GetCalculationExecutionStatusRequest(input) return out, req.Send() } -// ListDatabasesWithContext is the same as ListDatabases with the addition of +// GetCalculationExecutionStatusWithContext is the same as GetCalculationExecutionStatus with the addition of // the ability to pass a context and additional request options. // -// See ListDatabases for details on how to use this API operation. +// See GetCalculationExecutionStatus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListDatabasesWithContext(ctx aws.Context, input *ListDatabasesInput, opts ...request.Option) (*ListDatabasesOutput, error) { - req, out := c.ListDatabasesRequest(input) +func (c *Athena) GetCalculationExecutionStatusWithContext(ctx aws.Context, input *GetCalculationExecutionStatusInput, opts ...request.Option) (*GetCalculationExecutionStatusOutput, error) { + req, out := c.GetCalculationExecutionStatusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListDatabasesPages iterates over the pages of a ListDatabases operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opGetCapacityAssignmentConfiguration = "GetCapacityAssignmentConfiguration" + +// GetCapacityAssignmentConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetCapacityAssignmentConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListDatabases method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListDatabases operation. -// pageNum := 0 -// err := client.ListDatabasesPages(params, -// func(page *athena.ListDatabasesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Athena) ListDatabasesPages(input *ListDatabasesInput, fn func(*ListDatabasesOutput, bool) bool) error { - return c.ListDatabasesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDatabasesPagesWithContext same as ListDatabasesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListDatabasesPagesWithContext(ctx aws.Context, input *ListDatabasesInput, fn func(*ListDatabasesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDatabasesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDatabasesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDatabasesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListEngineVersions = "ListEngineVersions" - -// ListEngineVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListEngineVersions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListEngineVersions for more information on using the ListEngineVersions +// See GetCapacityAssignmentConfiguration for more information on using the GetCapacityAssignmentConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetCapacityAssignmentConfigurationRequest method. +// req, resp := client.GetCapacityAssignmentConfigurationRequest(params) // -// // Example sending a request using the ListEngineVersionsRequest method. -// req, resp := client.ListEngineVersionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListEngineVersions -func (c *Athena) ListEngineVersionsRequest(input *ListEngineVersionsInput) (req *request.Request, output *ListEngineVersionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCapacityAssignmentConfiguration +func (c *Athena) GetCapacityAssignmentConfigurationRequest(input *GetCapacityAssignmentConfigurationInput) (req *request.Request, output *GetCapacityAssignmentConfigurationOutput) { op := &request.Operation{ - Name: opListEngineVersions, + Name: opGetCapacityAssignmentConfiguration, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListEngineVersionsInput{} + input = &GetCapacityAssignmentConfigurationInput{} } - output = &ListEngineVersionsOutput{} + output = &GetCapacityAssignmentConfigurationOutput{} req = c.newRequest(op, input, output) return } -// ListEngineVersions API operation for Amazon Athena. +// GetCapacityAssignmentConfiguration API operation for Amazon Athena. // -// Returns a list of engine versions that are available to choose from, including -// the Auto option. +// Gets the capacity assignment configuration for a capacity reservation, if +// one exists. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListEngineVersions for usage and error information. +// API operation GetCapacityAssignmentConfiguration for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListEngineVersions -func (c *Athena) ListEngineVersions(input *ListEngineVersionsInput) (*ListEngineVersionsOutput, error) { - req, out := c.ListEngineVersionsRequest(input) +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCapacityAssignmentConfiguration +func (c *Athena) GetCapacityAssignmentConfiguration(input *GetCapacityAssignmentConfigurationInput) (*GetCapacityAssignmentConfigurationOutput, error) { + req, out := c.GetCapacityAssignmentConfigurationRequest(input) return out, req.Send() } -// ListEngineVersionsWithContext is the same as ListEngineVersions with the addition of +// GetCapacityAssignmentConfigurationWithContext is the same as GetCapacityAssignmentConfiguration with the addition of // the ability to pass a context and additional request options. // -// See ListEngineVersions for details on how to use this API operation. +// See GetCapacityAssignmentConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListEngineVersionsWithContext(ctx aws.Context, input *ListEngineVersionsInput, opts ...request.Option) (*ListEngineVersionsOutput, error) { - req, out := c.ListEngineVersionsRequest(input) +func (c *Athena) GetCapacityAssignmentConfigurationWithContext(ctx aws.Context, input *GetCapacityAssignmentConfigurationInput, opts ...request.Option) (*GetCapacityAssignmentConfigurationOutput, error) { + req, out := c.GetCapacityAssignmentConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListNamedQueries = "ListNamedQueries" +const opGetCapacityReservation = "GetCapacityReservation" -// ListNamedQueriesRequest generates a "aws/request.Request" representing the -// client's request for the ListNamedQueries operation. The "output" return +// GetCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the GetCapacityReservation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListNamedQueries for more information on using the ListNamedQueries +// See GetCapacityReservation for more information on using the GetCapacityReservation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetCapacityReservationRequest method. +// req, resp := client.GetCapacityReservationRequest(params) // -// // Example sending a request using the ListNamedQueriesRequest method. -// req, resp := client.ListNamedQueriesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries -func (c *Athena) ListNamedQueriesRequest(input *ListNamedQueriesInput) (req *request.Request, output *ListNamedQueriesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCapacityReservation +func (c *Athena) GetCapacityReservationRequest(input *GetCapacityReservationInput) (req *request.Request, output *GetCapacityReservationOutput) { op := &request.Operation{ - Name: opListNamedQueries, + Name: opGetCapacityReservation, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListNamedQueriesInput{} + input = &GetCapacityReservationInput{} } - output = &ListNamedQueriesOutput{} + output = &GetCapacityReservationOutput{} req = c.newRequest(op, input, output) return } -// ListNamedQueries API operation for Amazon Athena. -// -// Provides a list of available query IDs only for queries saved in the specified -// workgroup. Requires that you have access to the specified workgroup. If a -// workgroup is not specified, lists the saved queries for the primary workgroup. +// GetCapacityReservation API operation for Amazon Athena. // -// For code samples using the Amazon Web Services SDK for Java, see Examples -// and Code Samples (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) -// in the Amazon Athena User Guide. +// Returns information about the capacity reservation with the specified name. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListNamedQueries for usage and error information. +// API operation GetCapacityReservation for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries -func (c *Athena) ListNamedQueries(input *ListNamedQueriesInput) (*ListNamedQueriesOutput, error) { - req, out := c.ListNamedQueriesRequest(input) +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetCapacityReservation +func (c *Athena) GetCapacityReservation(input *GetCapacityReservationInput) (*GetCapacityReservationOutput, error) { + req, out := c.GetCapacityReservationRequest(input) return out, req.Send() } -// ListNamedQueriesWithContext is the same as ListNamedQueries with the addition of +// GetCapacityReservationWithContext is the same as GetCapacityReservation with the addition of // the ability to pass a context and additional request options. // -// See ListNamedQueries for details on how to use this API operation. +// See GetCapacityReservation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListNamedQueriesWithContext(ctx aws.Context, input *ListNamedQueriesInput, opts ...request.Option) (*ListNamedQueriesOutput, error) { - req, out := c.ListNamedQueriesRequest(input) +func (c *Athena) GetCapacityReservationWithContext(ctx aws.Context, input *GetCapacityReservationInput, opts ...request.Option) (*GetCapacityReservationOutput, error) { + req, out := c.GetCapacityReservationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListNamedQueriesPages iterates over the pages of a ListNamedQueries operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListNamedQueries method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListNamedQueries operation. -// pageNum := 0 -// err := client.ListNamedQueriesPages(params, -// func(page *athena.ListNamedQueriesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Athena) ListNamedQueriesPages(input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool) error { - return c.ListNamedQueriesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListNamedQueriesPagesWithContext same as ListNamedQueriesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListNamedQueriesPagesWithContext(ctx aws.Context, input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListNamedQueriesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListNamedQueriesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListNamedQueriesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListPreparedStatements = "ListPreparedStatements" +const opGetDataCatalog = "GetDataCatalog" -// ListPreparedStatementsRequest generates a "aws/request.Request" representing the -// client's request for the ListPreparedStatements operation. The "output" return +// GetDataCatalogRequest generates a "aws/request.Request" representing the +// client's request for the GetDataCatalog operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListPreparedStatements for more information on using the ListPreparedStatements +// See GetDataCatalog for more information on using the GetDataCatalog // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetDataCatalogRequest method. +// req, resp := client.GetDataCatalogRequest(params) // -// // Example sending a request using the ListPreparedStatementsRequest method. -// req, resp := client.ListPreparedStatementsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListPreparedStatements -func (c *Athena) ListPreparedStatementsRequest(input *ListPreparedStatementsInput) (req *request.Request, output *ListPreparedStatementsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDataCatalog +func (c *Athena) GetDataCatalogRequest(input *GetDataCatalogInput) (req *request.Request, output *GetDataCatalogOutput) { op := &request.Operation{ - Name: opListPreparedStatements, + Name: opGetDataCatalog, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListPreparedStatementsInput{} + input = &GetDataCatalogInput{} } - output = &ListPreparedStatementsOutput{} + output = &GetDataCatalogOutput{} req = c.newRequest(op, input, output) return } -// ListPreparedStatements API operation for Amazon Athena. +// GetDataCatalog API operation for Amazon Athena. // -// Lists the prepared statements in the specfied workgroup. +// Returns the specified data catalog. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListPreparedStatements for usage and error information. +// API operation GetDataCatalog for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListPreparedStatements -func (c *Athena) ListPreparedStatements(input *ListPreparedStatementsInput) (*ListPreparedStatementsOutput, error) { - req, out := c.ListPreparedStatementsRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDataCatalog +func (c *Athena) GetDataCatalog(input *GetDataCatalogInput) (*GetDataCatalogOutput, error) { + req, out := c.GetDataCatalogRequest(input) return out, req.Send() } -// ListPreparedStatementsWithContext is the same as ListPreparedStatements with the addition of +// GetDataCatalogWithContext is the same as GetDataCatalog with the addition of // the ability to pass a context and additional request options. // -// See ListPreparedStatements for details on how to use this API operation. +// See GetDataCatalog for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListPreparedStatementsWithContext(ctx aws.Context, input *ListPreparedStatementsInput, opts ...request.Option) (*ListPreparedStatementsOutput, error) { - req, out := c.ListPreparedStatementsRequest(input) +func (c *Athena) GetDataCatalogWithContext(ctx aws.Context, input *GetDataCatalogInput, opts ...request.Option) (*GetDataCatalogOutput, error) { + req, out := c.GetDataCatalogRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListPreparedStatementsPages iterates over the pages of a ListPreparedStatements operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListPreparedStatements method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListPreparedStatements operation. -// pageNum := 0 -// err := client.ListPreparedStatementsPages(params, -// func(page *athena.ListPreparedStatementsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Athena) ListPreparedStatementsPages(input *ListPreparedStatementsInput, fn func(*ListPreparedStatementsOutput, bool) bool) error { - return c.ListPreparedStatementsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListPreparedStatementsPagesWithContext same as ListPreparedStatementsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListPreparedStatementsPagesWithContext(ctx aws.Context, input *ListPreparedStatementsInput, fn func(*ListPreparedStatementsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListPreparedStatementsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListPreparedStatementsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListPreparedStatementsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListQueryExecutions = "ListQueryExecutions" +const opGetDatabase = "GetDatabase" -// ListQueryExecutionsRequest generates a "aws/request.Request" representing the -// client's request for the ListQueryExecutions operation. The "output" return +// GetDatabaseRequest generates a "aws/request.Request" representing the +// client's request for the GetDatabase operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListQueryExecutions for more information on using the ListQueryExecutions +// See GetDatabase for more information on using the GetDatabase // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetDatabaseRequest method. +// req, resp := client.GetDatabaseRequest(params) // -// // Example sending a request using the ListQueryExecutionsRequest method. -// req, resp := client.ListQueryExecutionsRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions -func (c *Athena) ListQueryExecutionsRequest(input *ListQueryExecutionsInput) (req *request.Request, output *ListQueryExecutionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDatabase +func (c *Athena) GetDatabaseRequest(input *GetDatabaseInput) (req *request.Request, output *GetDatabaseOutput) { op := &request.Operation{ - Name: opListQueryExecutions, + Name: opGetDatabase, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListQueryExecutionsInput{} + input = &GetDatabaseInput{} } - output = &ListQueryExecutionsOutput{} + output = &GetDatabaseOutput{} req = c.newRequest(op, input, output) return } -// ListQueryExecutions API operation for Amazon Athena. -// -// Provides a list of available query execution IDs for the queries in the specified -// workgroup. If a workgroup is not specified, returns a list of query execution -// IDs for the primary workgroup. Requires you to have access to the workgroup -// in which the queries ran. +// GetDatabase API operation for Amazon Athena. // -// For code samples using the Amazon Web Services SDK for Java, see Examples -// and Code Samples (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) -// in the Amazon Athena User Guide. +// Returns a database object for the specified database and data catalog. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListQueryExecutions for usage and error information. +// API operation GetDatabase for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions -func (c *Athena) ListQueryExecutions(input *ListQueryExecutionsInput) (*ListQueryExecutionsOutput, error) { - req, out := c.ListQueryExecutionsRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - MetadataException +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDatabase +func (c *Athena) GetDatabase(input *GetDatabaseInput) (*GetDatabaseOutput, error) { + req, out := c.GetDatabaseRequest(input) return out, req.Send() } -// ListQueryExecutionsWithContext is the same as ListQueryExecutions with the addition of +// GetDatabaseWithContext is the same as GetDatabase with the addition of // the ability to pass a context and additional request options. // -// See ListQueryExecutions for details on how to use this API operation. +// See GetDatabase for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListQueryExecutionsWithContext(ctx aws.Context, input *ListQueryExecutionsInput, opts ...request.Option) (*ListQueryExecutionsOutput, error) { - req, out := c.ListQueryExecutionsRequest(input) +func (c *Athena) GetDatabaseWithContext(ctx aws.Context, input *GetDatabaseInput, opts ...request.Option) (*GetDatabaseOutput, error) { + req, out := c.GetDatabaseRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListQueryExecutionsPages iterates over the pages of a ListQueryExecutions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListQueryExecutions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListQueryExecutions operation. -// pageNum := 0 -// err := client.ListQueryExecutionsPages(params, -// func(page *athena.ListQueryExecutionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Athena) ListQueryExecutionsPages(input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool) error { - return c.ListQueryExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListQueryExecutionsPagesWithContext same as ListQueryExecutionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListQueryExecutionsPagesWithContext(ctx aws.Context, input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListQueryExecutionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListQueryExecutionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListQueryExecutionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTableMetadata = "ListTableMetadata" +const opGetNamedQuery = "GetNamedQuery" -// ListTableMetadataRequest generates a "aws/request.Request" representing the -// client's request for the ListTableMetadata operation. The "output" return +// GetNamedQueryRequest generates a "aws/request.Request" representing the +// client's request for the GetNamedQuery operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTableMetadata for more information on using the ListTableMetadata +// See GetNamedQuery for more information on using the GetNamedQuery // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetNamedQueryRequest method. +// req, resp := client.GetNamedQueryRequest(params) // -// // Example sending a request using the ListTableMetadataRequest method. -// req, resp := client.ListTableMetadataRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTableMetadata -func (c *Athena) ListTableMetadataRequest(input *ListTableMetadataInput) (req *request.Request, output *ListTableMetadataOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNamedQuery +func (c *Athena) GetNamedQueryRequest(input *GetNamedQueryInput) (req *request.Request, output *GetNamedQueryOutput) { op := &request.Operation{ - Name: opListTableMetadata, + Name: opGetNamedQuery, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListTableMetadataInput{} + input = &GetNamedQueryInput{} } - output = &ListTableMetadataOutput{} + output = &GetNamedQueryOutput{} req = c.newRequest(op, input, output) return } -// ListTableMetadata API operation for Amazon Athena. +// GetNamedQuery API operation for Amazon Athena. // -// Lists the metadata for the tables in the specified data catalog database. +// Returns information about a single query. Requires that you have access to +// the workgroup in which the query was saved. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListTableMetadata for usage and error information. +// API operation GetNamedQuery for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * MetadataException -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTableMetadata -func (c *Athena) ListTableMetadata(input *ListTableMetadataInput) (*ListTableMetadataOutput, error) { - req, out := c.ListTableMetadataRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNamedQuery +func (c *Athena) GetNamedQuery(input *GetNamedQueryInput) (*GetNamedQueryOutput, error) { + req, out := c.GetNamedQueryRequest(input) return out, req.Send() } -// ListTableMetadataWithContext is the same as ListTableMetadata with the addition of +// GetNamedQueryWithContext is the same as GetNamedQuery with the addition of // the ability to pass a context and additional request options. // -// See ListTableMetadata for details on how to use this API operation. +// See GetNamedQuery for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListTableMetadataWithContext(ctx aws.Context, input *ListTableMetadataInput, opts ...request.Option) (*ListTableMetadataOutput, error) { - req, out := c.ListTableMetadataRequest(input) +func (c *Athena) GetNamedQueryWithContext(ctx aws.Context, input *GetNamedQueryInput, opts ...request.Option) (*GetNamedQueryOutput, error) { + req, out := c.GetNamedQueryRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTableMetadataPages iterates over the pages of a ListTableMetadata operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTableMetadata method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTableMetadata operation. -// pageNum := 0 -// err := client.ListTableMetadataPages(params, -// func(page *athena.ListTableMetadataOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Athena) ListTableMetadataPages(input *ListTableMetadataInput, fn func(*ListTableMetadataOutput, bool) bool) error { - return c.ListTableMetadataPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTableMetadataPagesWithContext same as ListTableMetadataPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListTableMetadataPagesWithContext(ctx aws.Context, input *ListTableMetadataInput, fn func(*ListTableMetadataOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTableMetadataInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTableMetadataRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListTableMetadataOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTagsForResource = "ListTagsForResource" +const opGetNotebookMetadata = "GetNotebookMetadata" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// GetNotebookMetadataRequest generates a "aws/request.Request" representing the +// client's request for the GetNotebookMetadata operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See GetNotebookMetadata for more information on using the GetNotebookMetadata // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetNotebookMetadataRequest method. +// req, resp := client.GetNotebookMetadataRequest(params) // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource -func (c *Athena) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNotebookMetadata +func (c *Athena) GetNotebookMetadataRequest(input *GetNotebookMetadataInput) (req *request.Request, output *GetNotebookMetadataOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opGetNotebookMetadata, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListTagsForResourceInput{} + input = &GetNotebookMetadataInput{} } - output = &ListTagsForResourceOutput{} + output = &GetNotebookMetadataOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Amazon Athena. +// GetNotebookMetadata API operation for Amazon Athena. // -// Lists the tags associated with an Athena workgroup or data catalog resource. +// Retrieves notebook metadata for the specified notebook ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListTagsForResource for usage and error information. +// API operation GetNotebookMetadata for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * ResourceNotFoundException -// A resource, such as a workgroup, was not found. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource -func (c *Athena) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetNotebookMetadata +func (c *Athena) GetNotebookMetadata(input *GetNotebookMetadataInput) (*GetNotebookMetadataOutput, error) { + req, out := c.GetNotebookMetadataRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// GetNotebookMetadataWithContext is the same as GetNotebookMetadata with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See GetNotebookMetadata for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *Athena) GetNotebookMetadataWithContext(ctx aws.Context, input *GetNotebookMetadataInput, opts ...request.Option) (*GetNotebookMetadataOutput, error) { + req, out := c.GetNotebookMetadataRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTagsForResource method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTagsForResource operation. -// pageNum := 0 -// err := client.ListTagsForResourcePages(params, -// func(page *athena.ListTagsForResourceOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Athena) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { - return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTagsForResourceInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTagsForResourceRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListWorkGroups = "ListWorkGroups" +const opGetPreparedStatement = "GetPreparedStatement" -// ListWorkGroupsRequest generates a "aws/request.Request" representing the -// client's request for the ListWorkGroups operation. The "output" return +// GetPreparedStatementRequest generates a "aws/request.Request" representing the +// client's request for the GetPreparedStatement operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListWorkGroups for more information on using the ListWorkGroups +// See GetPreparedStatement for more information on using the GetPreparedStatement // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetPreparedStatementRequest method. +// req, resp := client.GetPreparedStatementRequest(params) // -// // Example sending a request using the ListWorkGroupsRequest method. -// req, resp := client.ListWorkGroupsRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListWorkGroups -func (c *Athena) ListWorkGroupsRequest(input *ListWorkGroupsInput) (req *request.Request, output *ListWorkGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetPreparedStatement +func (c *Athena) GetPreparedStatementRequest(input *GetPreparedStatementInput) (req *request.Request, output *GetPreparedStatementOutput) { op := &request.Operation{ - Name: opListWorkGroups, + Name: opGetPreparedStatement, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListWorkGroupsInput{} + input = &GetPreparedStatementInput{} } - output = &ListWorkGroupsOutput{} + output = &GetPreparedStatementOutput{} req = c.newRequest(op, input, output) return } -// ListWorkGroups API operation for Amazon Athena. +// GetPreparedStatement API operation for Amazon Athena. // -// Lists available workgroups for the account. +// Retrieves the prepared statement with the specified name from the specified +// workgroup. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListWorkGroups for usage and error information. +// API operation GetPreparedStatement for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListWorkGroups -func (c *Athena) ListWorkGroups(input *ListWorkGroupsInput) (*ListWorkGroupsOutput, error) { - req, out := c.ListWorkGroupsRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetPreparedStatement +func (c *Athena) GetPreparedStatement(input *GetPreparedStatementInput) (*GetPreparedStatementOutput, error) { + req, out := c.GetPreparedStatementRequest(input) return out, req.Send() } -// ListWorkGroupsWithContext is the same as ListWorkGroups with the addition of +// GetPreparedStatementWithContext is the same as GetPreparedStatement with the addition of // the ability to pass a context and additional request options. // -// See ListWorkGroups for details on how to use this API operation. +// See GetPreparedStatement for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListWorkGroupsWithContext(ctx aws.Context, input *ListWorkGroupsInput, opts ...request.Option) (*ListWorkGroupsOutput, error) { - req, out := c.ListWorkGroupsRequest(input) +func (c *Athena) GetPreparedStatementWithContext(ctx aws.Context, input *GetPreparedStatementInput, opts ...request.Option) (*GetPreparedStatementOutput, error) { + req, out := c.GetPreparedStatementRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListWorkGroupsPages iterates over the pages of a ListWorkGroups operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListWorkGroups method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListWorkGroups operation. -// pageNum := 0 -// err := client.ListWorkGroupsPages(params, -// func(page *athena.ListWorkGroupsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *Athena) ListWorkGroupsPages(input *ListWorkGroupsInput, fn func(*ListWorkGroupsOutput, bool) bool) error { - return c.ListWorkGroupsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListWorkGroupsPagesWithContext same as ListWorkGroupsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Athena) ListWorkGroupsPagesWithContext(ctx aws.Context, input *ListWorkGroupsInput, fn func(*ListWorkGroupsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListWorkGroupsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListWorkGroupsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListWorkGroupsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opStartQueryExecution = "StartQueryExecution" +const opGetQueryExecution = "GetQueryExecution" -// StartQueryExecutionRequest generates a "aws/request.Request" representing the -// client's request for the StartQueryExecution operation. The "output" return +// GetQueryExecutionRequest generates a "aws/request.Request" representing the +// client's request for the GetQueryExecution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartQueryExecution for more information on using the StartQueryExecution +// See GetQueryExecution for more information on using the GetQueryExecution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetQueryExecutionRequest method. +// req, resp := client.GetQueryExecutionRequest(params) // -// // Example sending a request using the StartQueryExecutionRequest method. -// req, resp := client.StartQueryExecutionRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartQueryExecution -func (c *Athena) StartQueryExecutionRequest(input *StartQueryExecutionInput) (req *request.Request, output *StartQueryExecutionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryExecution +func (c *Athena) GetQueryExecutionRequest(input *GetQueryExecutionInput) (req *request.Request, output *GetQueryExecutionOutput) { op := &request.Operation{ - Name: opStartQueryExecution, + Name: opGetQueryExecution, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartQueryExecutionInput{} + input = &GetQueryExecutionInput{} } - output = &StartQueryExecutionOutput{} + output = &GetQueryExecutionOutput{} req = c.newRequest(op, input, output) return } -// StartQueryExecution API operation for Amazon Athena. +// GetQueryExecution API operation for Amazon Athena. // -// Runs the SQL query statements contained in the Query. Requires you to have -// access to the workgroup in which the query ran. Running queries against an -// external catalog requires GetDataCatalog permission to the catalog. For code -// samples using the Amazon Web Services SDK for Java, see Examples and Code -// Samples (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in -// the Amazon Athena User Guide. +// Returns information about a single execution of a query if you have access +// to the workgroup in which the query ran. Each time a query executes, information +// about the query execution is saved with a unique ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation StartQueryExecution for usage and error information. +// API operation GetQueryExecution for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * TooManyRequestsException -// Indicates that the request was throttled. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartQueryExecution -func (c *Athena) StartQueryExecution(input *StartQueryExecutionInput) (*StartQueryExecutionOutput, error) { - req, out := c.StartQueryExecutionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryExecution +func (c *Athena) GetQueryExecution(input *GetQueryExecutionInput) (*GetQueryExecutionOutput, error) { + req, out := c.GetQueryExecutionRequest(input) return out, req.Send() } -// StartQueryExecutionWithContext is the same as StartQueryExecution with the addition of +// GetQueryExecutionWithContext is the same as GetQueryExecution with the addition of // the ability to pass a context and additional request options. // -// See StartQueryExecution for details on how to use this API operation. +// See GetQueryExecution for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) StartQueryExecutionWithContext(ctx aws.Context, input *StartQueryExecutionInput, opts ...request.Option) (*StartQueryExecutionOutput, error) { - req, out := c.StartQueryExecutionRequest(input) +func (c *Athena) GetQueryExecutionWithContext(ctx aws.Context, input *GetQueryExecutionInput, opts ...request.Option) (*GetQueryExecutionOutput, error) { + req, out := c.GetQueryExecutionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopQueryExecution = "StopQueryExecution" +const opGetQueryResults = "GetQueryResults" -// StopQueryExecutionRequest generates a "aws/request.Request" representing the -// client's request for the StopQueryExecution operation. The "output" return +// GetQueryResultsRequest generates a "aws/request.Request" representing the +// client's request for the GetQueryResults operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopQueryExecution for more information on using the StopQueryExecution +// See GetQueryResults for more information on using the GetQueryResults // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetQueryResultsRequest method. +// req, resp := client.GetQueryResultsRequest(params) // -// // Example sending a request using the StopQueryExecutionRequest method. -// req, resp := client.StopQueryExecutionRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopQueryExecution -func (c *Athena) StopQueryExecutionRequest(input *StopQueryExecutionInput) (req *request.Request, output *StopQueryExecutionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryResults +func (c *Athena) GetQueryResultsRequest(input *GetQueryResultsInput) (req *request.Request, output *GetQueryResultsOutput) { op := &request.Operation{ - Name: opStopQueryExecution, + Name: opGetQueryResults, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &StopQueryExecutionInput{} + input = &GetQueryResultsInput{} } - output = &StopQueryExecutionOutput{} + output = &GetQueryResultsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// StopQueryExecution API operation for Amazon Athena. +// GetQueryResults API operation for Amazon Athena. // -// Stops a query execution. Requires you to have access to the workgroup in -// which the query ran. +// Streams the results of a single query execution specified by QueryExecutionId +// from the Athena query results location in Amazon S3. For more information, +// see Working with query results, recent queries, and output files (https://docs.aws.amazon.com/athena/latest/ug/querying.html) +// in the Amazon Athena User Guide. This request does not execute the query +// but returns results. Use StartQueryExecution to run a query. +// +// To stream query results successfully, the IAM principal with permission to +// call GetQueryResults also must have permissions to the Amazon S3 GetObject +// action for the Athena query results location. // -// For code samples using the Amazon Web Services SDK for Java, see Examples -// and Code Samples (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) -// in the Amazon Athena User Guide. +// IAM principals with permission to the Amazon S3 GetObject action for the +// query results location are able to retrieve query results from Amazon S3 +// even if permission to the GetQueryResults action is denied. To restrict user +// or role access, ensure that Amazon S3 permissions to the Athena query location +// are denied. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation StopQueryExecution for usage and error information. +// API operation GetQueryResults for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopQueryExecution -func (c *Athena) StopQueryExecution(input *StopQueryExecutionInput) (*StopQueryExecutionOutput, error) { - req, out := c.StopQueryExecutionRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryResults +func (c *Athena) GetQueryResults(input *GetQueryResultsInput) (*GetQueryResultsOutput, error) { + req, out := c.GetQueryResultsRequest(input) return out, req.Send() } -// StopQueryExecutionWithContext is the same as StopQueryExecution with the addition of +// GetQueryResultsWithContext is the same as GetQueryResults with the addition of // the ability to pass a context and additional request options. // -// See StopQueryExecution for details on how to use this API operation. +// See GetQueryResults for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) StopQueryExecutionWithContext(ctx aws.Context, input *StopQueryExecutionInput, opts ...request.Option) (*StopQueryExecutionOutput, error) { - req, out := c.StopQueryExecutionRequest(input) +func (c *Athena) GetQueryResultsWithContext(ctx aws.Context, input *GetQueryResultsInput, opts ...request.Option) (*GetQueryResultsOutput, error) { + req, out := c.GetQueryResultsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +// GetQueryResultsPages iterates over the pages of a GetQueryResults operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetQueryResults method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetQueryResults operation. +// pageNum := 0 +// err := client.GetQueryResultsPages(params, +// func(page *athena.GetQueryResultsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) GetQueryResultsPages(input *GetQueryResultsInput, fn func(*GetQueryResultsOutput, bool) bool) error { + return c.GetQueryResultsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// GetQueryResultsPagesWithContext same as GetQueryResultsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) GetQueryResultsPagesWithContext(ctx aws.Context, input *GetQueryResultsInput, fn func(*GetQueryResultsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetQueryResultsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetQueryResultsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetQueryResultsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetQueryRuntimeStatistics = "GetQueryRuntimeStatistics" + +// GetQueryRuntimeStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the GetQueryRuntimeStatistics operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See GetQueryRuntimeStatistics for more information on using the GetQueryRuntimeStatistics // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetQueryRuntimeStatisticsRequest method. +// req, resp := client.GetQueryRuntimeStatisticsRequest(params) // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TagResource -func (c *Athena) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryRuntimeStatistics +func (c *Athena) GetQueryRuntimeStatisticsRequest(input *GetQueryRuntimeStatisticsInput) (req *request.Request, output *GetQueryRuntimeStatisticsOutput) { op := &request.Operation{ - Name: opTagResource, + Name: opGetQueryRuntimeStatistics, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TagResourceInput{} + input = &GetQueryRuntimeStatisticsInput{} } - output = &TagResourceOutput{} + output = &GetQueryRuntimeStatisticsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for Amazon Athena. +// GetQueryRuntimeStatistics API operation for Amazon Athena. // -// Adds one or more tags to an Athena resource. A tag is a label that you assign -// to a resource. In Athena, a resource can be a workgroup or data catalog. -// Each tag consists of a key and an optional value, both of which you define. -// For example, you can use tags to categorize Athena workgroups or data catalogs -// by purpose, owner, or environment. Use a consistent set of tag keys to make -// it easier to search and filter workgroups or data catalogs in your account. -// For best practices, see Tagging Best Practices (https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). -// Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can -// be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers -// representable in UTF-8, and the following characters: + - = . _ : / @. Tag -// keys and values are case-sensitive. Tag keys must be unique per resource. -// If you specify more than one tag, separate them by commas. +// Returns query execution runtime statistics related to a single execution +// of a query if you have access to the workgroup in which the query ran. Query +// execution runtime statistics are returned only when QueryExecutionStatus$State +// is in a SUCCEEDED or FAILED state. Stage-level input and output row count +// and data size statistics are not shown when a query has row-level filters +// defined in Lake Formation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation TagResource for usage and error information. +// API operation GetQueryRuntimeStatistics for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * ResourceNotFoundException -// A resource, such as a workgroup, was not found. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TagResource -func (c *Athena) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetQueryRuntimeStatistics +func (c *Athena) GetQueryRuntimeStatistics(input *GetQueryRuntimeStatisticsInput) (*GetQueryRuntimeStatisticsOutput, error) { + req, out := c.GetQueryRuntimeStatisticsRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// GetQueryRuntimeStatisticsWithContext is the same as GetQueryRuntimeStatistics with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See GetQueryRuntimeStatistics for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *Athena) GetQueryRuntimeStatisticsWithContext(ctx aws.Context, input *GetQueryRuntimeStatisticsInput, opts ...request.Option) (*GetQueryRuntimeStatisticsOutput, error) { + req, out := c.GetQueryRuntimeStatisticsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +const opGetSession = "GetSession" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// GetSessionRequest generates a "aws/request.Request" representing the +// client's request for the GetSession operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See GetSession for more information on using the GetSession // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetSessionRequest method. +// req, resp := client.GetSessionRequest(params) // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource -func (c *Athena) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetSession +func (c *Athena) GetSessionRequest(input *GetSessionInput) (req *request.Request, output *GetSessionOutput) { op := &request.Operation{ - Name: opUntagResource, + Name: opGetSession, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UntagResourceInput{} + input = &GetSessionInput{} } - output = &UntagResourceOutput{} + output = &GetSessionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for Amazon Athena. +// GetSession API operation for Amazon Athena. // -// Removes one or more tags from a data catalog or workgroup resource. +// Gets the full details of a previously created session, including the session +// status and configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation UntagResource for usage and error information. +// API operation GetSession for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * ResourceNotFoundException -// A resource, such as a workgroup, was not found. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource -func (c *Athena) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetSession +func (c *Athena) GetSession(input *GetSessionInput) (*GetSessionOutput, error) { + req, out := c.GetSessionRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// GetSessionWithContext is the same as GetSession with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See GetSession for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *Athena) GetSessionWithContext(ctx aws.Context, input *GetSessionInput, opts ...request.Option) (*GetSessionOutput, error) { + req, out := c.GetSessionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDataCatalog = "UpdateDataCatalog" +const opGetSessionStatus = "GetSessionStatus" -// UpdateDataCatalogRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDataCatalog operation. The "output" return +// GetSessionStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionStatus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDataCatalog for more information on using the UpdateDataCatalog +// See GetSessionStatus for more information on using the GetSessionStatus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetSessionStatusRequest method. +// req, resp := client.GetSessionStatusRequest(params) // -// // Example sending a request using the UpdateDataCatalogRequest method. -// req, resp := client.UpdateDataCatalogRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateDataCatalog -func (c *Athena) UpdateDataCatalogRequest(input *UpdateDataCatalogInput) (req *request.Request, output *UpdateDataCatalogOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetSessionStatus +func (c *Athena) GetSessionStatusRequest(input *GetSessionStatusInput) (req *request.Request, output *GetSessionStatusOutput) { op := &request.Operation{ - Name: opUpdateDataCatalog, + Name: opGetSessionStatus, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateDataCatalogInput{} + input = &GetSessionStatusInput{} } - output = &UpdateDataCatalogOutput{} + output = &GetSessionStatusOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateDataCatalog API operation for Amazon Athena. +// GetSessionStatus API operation for Amazon Athena. // -// Updates the data catalog that has the specified name. +// Gets the current status of a session. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation UpdateDataCatalog for usage and error information. +// API operation GetSessionStatus for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateDataCatalog -func (c *Athena) UpdateDataCatalog(input *UpdateDataCatalogInput) (*UpdateDataCatalogOutput, error) { - req, out := c.UpdateDataCatalogRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetSessionStatus +func (c *Athena) GetSessionStatus(input *GetSessionStatusInput) (*GetSessionStatusOutput, error) { + req, out := c.GetSessionStatusRequest(input) return out, req.Send() } -// UpdateDataCatalogWithContext is the same as UpdateDataCatalog with the addition of +// GetSessionStatusWithContext is the same as GetSessionStatus with the addition of // the ability to pass a context and additional request options. // -// See UpdateDataCatalog for details on how to use this API operation. +// See GetSessionStatus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) UpdateDataCatalogWithContext(ctx aws.Context, input *UpdateDataCatalogInput, opts ...request.Option) (*UpdateDataCatalogOutput, error) { - req, out := c.UpdateDataCatalogRequest(input) +func (c *Athena) GetSessionStatusWithContext(ctx aws.Context, input *GetSessionStatusInput, opts ...request.Option) (*GetSessionStatusOutput, error) { + req, out := c.GetSessionStatusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdatePreparedStatement = "UpdatePreparedStatement" +const opGetTableMetadata = "GetTableMetadata" -// UpdatePreparedStatementRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePreparedStatement operation. The "output" return +// GetTableMetadataRequest generates a "aws/request.Request" representing the +// client's request for the GetTableMetadata operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdatePreparedStatement for more information on using the UpdatePreparedStatement +// See GetTableMetadata for more information on using the GetTableMetadata // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetTableMetadataRequest method. +// req, resp := client.GetTableMetadataRequest(params) // -// // Example sending a request using the UpdatePreparedStatementRequest method. -// req, resp := client.UpdatePreparedStatementRequest(params) +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdatePreparedStatement -func (c *Athena) UpdatePreparedStatementRequest(input *UpdatePreparedStatementInput) (req *request.Request, output *UpdatePreparedStatementOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetTableMetadata +func (c *Athena) GetTableMetadataRequest(input *GetTableMetadataInput) (req *request.Request, output *GetTableMetadataOutput) { op := &request.Operation{ - Name: opUpdatePreparedStatement, + Name: opGetTableMetadata, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdatePreparedStatementInput{} + input = &GetTableMetadataInput{} } - output = &UpdatePreparedStatementOutput{} + output = &GetTableMetadataOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdatePreparedStatement API operation for Amazon Athena. +// GetTableMetadata API operation for Amazon Athena. // -// Updates a prepared statement. +// Returns table metadata for the specified catalog, database, and table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation UpdatePreparedStatement for usage and error information. +// API operation GetTableMetadata for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// * ResourceNotFoundException -// A resource, such as a workgroup, was not found. +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdatePreparedStatement -func (c *Athena) UpdatePreparedStatement(input *UpdatePreparedStatementInput) (*UpdatePreparedStatementOutput, error) { - req, out := c.UpdatePreparedStatementRequest(input) +// - MetadataException +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetTableMetadata +func (c *Athena) GetTableMetadata(input *GetTableMetadataInput) (*GetTableMetadataOutput, error) { + req, out := c.GetTableMetadataRequest(input) return out, req.Send() } -// UpdatePreparedStatementWithContext is the same as UpdatePreparedStatement with the addition of +// GetTableMetadataWithContext is the same as GetTableMetadata with the addition of // the ability to pass a context and additional request options. // -// See UpdatePreparedStatement for details on how to use this API operation. +// See GetTableMetadata for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) UpdatePreparedStatementWithContext(ctx aws.Context, input *UpdatePreparedStatementInput, opts ...request.Option) (*UpdatePreparedStatementOutput, error) { - req, out := c.UpdatePreparedStatementRequest(input) +func (c *Athena) GetTableMetadataWithContext(ctx aws.Context, input *GetTableMetadataInput, opts ...request.Option) (*GetTableMetadataOutput, error) { + req, out := c.GetTableMetadataRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateWorkGroup = "UpdateWorkGroup" +const opGetWorkGroup = "GetWorkGroup" -// UpdateWorkGroupRequest generates a "aws/request.Request" representing the -// client's request for the UpdateWorkGroup operation. The "output" return +// GetWorkGroupRequest generates a "aws/request.Request" representing the +// client's request for the GetWorkGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateWorkGroup for more information on using the UpdateWorkGroup +// See GetWorkGroup for more information on using the GetWorkGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetWorkGroupRequest method. +// req, resp := client.GetWorkGroupRequest(params) // -// // Example sending a request using the UpdateWorkGroupRequest method. -// req, resp := client.UpdateWorkGroupRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateWorkGroup -func (c *Athena) UpdateWorkGroupRequest(input *UpdateWorkGroupInput) (req *request.Request, output *UpdateWorkGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetWorkGroup +func (c *Athena) GetWorkGroupRequest(input *GetWorkGroupInput) (req *request.Request, output *GetWorkGroupOutput) { op := &request.Operation{ - Name: opUpdateWorkGroup, + Name: opGetWorkGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateWorkGroupInput{} + input = &GetWorkGroupInput{} } - output = &UpdateWorkGroupOutput{} + output = &GetWorkGroupOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateWorkGroup API operation for Amazon Athena. +// GetWorkGroup API operation for Amazon Athena. // -// Updates the workgroup with the specified name. The workgroup's name cannot -// be changed. +// Returns information about the workgroup with the specified name. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation UpdateWorkGroup for usage and error information. +// API operation GetWorkGroup for usage and error information. // // Returned Error Types: -// * InternalServerException -// Indicates a platform issue, which may be due to a transient condition or -// outage. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateWorkGroup -func (c *Athena) UpdateWorkGroup(input *UpdateWorkGroupInput) (*UpdateWorkGroupOutput, error) { - req, out := c.UpdateWorkGroupRequest(input) +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetWorkGroup +func (c *Athena) GetWorkGroup(input *GetWorkGroupInput) (*GetWorkGroupOutput, error) { + req, out := c.GetWorkGroupRequest(input) return out, req.Send() } -// UpdateWorkGroupWithContext is the same as UpdateWorkGroup with the addition of +// GetWorkGroupWithContext is the same as GetWorkGroup with the addition of // the ability to pass a context and additional request options. // -// See UpdateWorkGroup for details on how to use this API operation. +// See GetWorkGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) UpdateWorkGroupWithContext(ctx aws.Context, input *UpdateWorkGroupInput, opts ...request.Option) (*UpdateWorkGroupOutput, error) { - req, out := c.UpdateWorkGroupRequest(input) +func (c *Athena) GetWorkGroupWithContext(ctx aws.Context, input *GetWorkGroupInput, opts ...request.Option) (*GetWorkGroupOutput, error) { + req, out := c.GetWorkGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -type BatchGetNamedQueryInput struct { - _ struct{} `type:"structure"` - - // An array of query IDs. - // - // NamedQueryIds is a required field - NamedQueryIds []*string `min:"1" type:"list" required:"true"` -} +const opImportNotebook = "ImportNotebook" -// String returns the string representation. +// ImportNotebookRequest generates a "aws/request.Request" representing the +// client's request for the ImportNotebook operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetNamedQueryInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetNamedQueryInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetNamedQueryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetNamedQueryInput"} - if s.NamedQueryIds == nil { - invalidParams.Add(request.NewErrParamRequired("NamedQueryIds")) - } - if s.NamedQueryIds != nil && len(s.NamedQueryIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NamedQueryIds", 1)) +// See ImportNotebook for more information on using the ImportNotebook +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ImportNotebookRequest method. +// req, resp := client.ImportNotebookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ImportNotebook +func (c *Athena) ImportNotebookRequest(input *ImportNotebookInput) (req *request.Request, output *ImportNotebookOutput) { + op := &request.Operation{ + Name: opImportNotebook, + HTTPMethod: "POST", + HTTPPath: "/", } - if invalidParams.Len() > 0 { - return invalidParams + if input == nil { + input = &ImportNotebookInput{} } - return nil -} - -// SetNamedQueryIds sets the NamedQueryIds field's value. -func (s *BatchGetNamedQueryInput) SetNamedQueryIds(v []*string) *BatchGetNamedQueryInput { - s.NamedQueryIds = v - return s -} - -type BatchGetNamedQueryOutput struct { - _ struct{} `type:"structure"` - // Information about the named query IDs submitted. - NamedQueries []*NamedQuery `type:"list"` - - // Information about provided query IDs. - UnprocessedNamedQueryIds []*UnprocessedNamedQueryId `type:"list"` + output = &ImportNotebookOutput{} + req = c.newRequest(op, input, output) + return } -// String returns the string representation. +// ImportNotebook API operation for Amazon Athena. // -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetNamedQueryOutput) String() string { - return awsutil.Prettify(s) +// Imports a single ipynb file to a Spark enabled workgroup. To import the notebook, +// the request must specify a value for either Payload or NoteBookS3LocationUri. +// If neither is specified or both are specified, an InvalidRequestException +// occurs. The maximum file size that can be imported is 10 megabytes. If an +// ipynb file with the same name already exists in the workgroup, throws an +// error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ImportNotebook for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ImportNotebook +func (c *Athena) ImportNotebook(input *ImportNotebookInput) (*ImportNotebookOutput, error) { + req, out := c.ImportNotebookRequest(input) + return out, req.Send() } -// GoString returns the string representation. +// ImportNotebookWithContext is the same as ImportNotebook with the addition of +// the ability to pass a context and additional request options. // -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s BatchGetNamedQueryOutput) GoString() string { - return s.String() +// See ImportNotebook for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ImportNotebookWithContext(ctx aws.Context, input *ImportNotebookInput, opts ...request.Option) (*ImportNotebookOutput, error) { + req, out := c.ImportNotebookRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// SetNamedQueries sets the NamedQueries field's value. -func (s *BatchGetNamedQueryOutput) SetNamedQueries(v []*NamedQuery) *BatchGetNamedQueryOutput { - s.NamedQueries = v - return s -} +const opListApplicationDPUSizes = "ListApplicationDPUSizes" -// SetUnprocessedNamedQueryIds sets the UnprocessedNamedQueryIds field's value. -func (s *BatchGetNamedQueryOutput) SetUnprocessedNamedQueryIds(v []*UnprocessedNamedQueryId) *BatchGetNamedQueryOutput { - s.UnprocessedNamedQueryIds = v - return s -} +// ListApplicationDPUSizesRequest generates a "aws/request.Request" representing the +// client's request for the ListApplicationDPUSizes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListApplicationDPUSizes for more information on using the ListApplicationDPUSizes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListApplicationDPUSizesRequest method. +// req, resp := client.ListApplicationDPUSizesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListApplicationDPUSizes +func (c *Athena) ListApplicationDPUSizesRequest(input *ListApplicationDPUSizesInput) (req *request.Request, output *ListApplicationDPUSizesOutput) { + op := &request.Operation{ + Name: opListApplicationDPUSizes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } -type BatchGetQueryExecutionInput struct { - _ struct{} `type:"structure"` + if input == nil { + input = &ListApplicationDPUSizesInput{} + } - // An array of query execution IDs. - // - // QueryExecutionIds is a required field - QueryExecutionIds []*string `min:"1" type:"list" required:"true"` + output = &ListApplicationDPUSizesOutput{} + req = c.newRequest(op, input, output) + return } -// String returns the string representation. +// ListApplicationDPUSizes API operation for Amazon Athena. // -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". +// Returns the supported DPU sizes for the supported application runtimes (for +// example, Athena notebook version 1). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListApplicationDPUSizes for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListApplicationDPUSizes +func (c *Athena) ListApplicationDPUSizes(input *ListApplicationDPUSizesInput) (*ListApplicationDPUSizesOutput, error) { + req, out := c.ListApplicationDPUSizesRequest(input) + return out, req.Send() +} + +// ListApplicationDPUSizesWithContext is the same as ListApplicationDPUSizes with the addition of +// the ability to pass a context and additional request options. +// +// See ListApplicationDPUSizes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListApplicationDPUSizesWithContext(ctx aws.Context, input *ListApplicationDPUSizesInput, opts ...request.Option) (*ListApplicationDPUSizesOutput, error) { + req, out := c.ListApplicationDPUSizesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListApplicationDPUSizesPages iterates over the pages of a ListApplicationDPUSizes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListApplicationDPUSizes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListApplicationDPUSizes operation. +// pageNum := 0 +// err := client.ListApplicationDPUSizesPages(params, +// func(page *athena.ListApplicationDPUSizesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListApplicationDPUSizesPages(input *ListApplicationDPUSizesInput, fn func(*ListApplicationDPUSizesOutput, bool) bool) error { + return c.ListApplicationDPUSizesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListApplicationDPUSizesPagesWithContext same as ListApplicationDPUSizesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListApplicationDPUSizesPagesWithContext(ctx aws.Context, input *ListApplicationDPUSizesInput, fn func(*ListApplicationDPUSizesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListApplicationDPUSizesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListApplicationDPUSizesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListApplicationDPUSizesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListCalculationExecutions = "ListCalculationExecutions" + +// ListCalculationExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the ListCalculationExecutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListCalculationExecutions for more information on using the ListCalculationExecutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListCalculationExecutionsRequest method. +// req, resp := client.ListCalculationExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListCalculationExecutions +func (c *Athena) ListCalculationExecutionsRequest(input *ListCalculationExecutionsInput) (req *request.Request, output *ListCalculationExecutionsOutput) { + op := &request.Operation{ + Name: opListCalculationExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCalculationExecutionsInput{} + } + + output = &ListCalculationExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListCalculationExecutions API operation for Amazon Athena. +// +// Lists the calculations that have been submitted to a session in descending +// order. Newer calculations are listed first; older calculations are listed +// later. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListCalculationExecutions for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListCalculationExecutions +func (c *Athena) ListCalculationExecutions(input *ListCalculationExecutionsInput) (*ListCalculationExecutionsOutput, error) { + req, out := c.ListCalculationExecutionsRequest(input) + return out, req.Send() +} + +// ListCalculationExecutionsWithContext is the same as ListCalculationExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See ListCalculationExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListCalculationExecutionsWithContext(ctx aws.Context, input *ListCalculationExecutionsInput, opts ...request.Option) (*ListCalculationExecutionsOutput, error) { + req, out := c.ListCalculationExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListCalculationExecutionsPages iterates over the pages of a ListCalculationExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCalculationExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCalculationExecutions operation. +// pageNum := 0 +// err := client.ListCalculationExecutionsPages(params, +// func(page *athena.ListCalculationExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListCalculationExecutionsPages(input *ListCalculationExecutionsInput, fn func(*ListCalculationExecutionsOutput, bool) bool) error { + return c.ListCalculationExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCalculationExecutionsPagesWithContext same as ListCalculationExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListCalculationExecutionsPagesWithContext(ctx aws.Context, input *ListCalculationExecutionsInput, fn func(*ListCalculationExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCalculationExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCalculationExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCalculationExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListCapacityReservations = "ListCapacityReservations" + +// ListCapacityReservationsRequest generates a "aws/request.Request" representing the +// client's request for the ListCapacityReservations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListCapacityReservations for more information on using the ListCapacityReservations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListCapacityReservationsRequest method. +// req, resp := client.ListCapacityReservationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListCapacityReservations +func (c *Athena) ListCapacityReservationsRequest(input *ListCapacityReservationsInput) (req *request.Request, output *ListCapacityReservationsOutput) { + op := &request.Operation{ + Name: opListCapacityReservations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListCapacityReservationsInput{} + } + + output = &ListCapacityReservationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListCapacityReservations API operation for Amazon Athena. +// +// Lists the capacity reservations for the current account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListCapacityReservations for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListCapacityReservations +func (c *Athena) ListCapacityReservations(input *ListCapacityReservationsInput) (*ListCapacityReservationsOutput, error) { + req, out := c.ListCapacityReservationsRequest(input) + return out, req.Send() +} + +// ListCapacityReservationsWithContext is the same as ListCapacityReservations with the addition of +// the ability to pass a context and additional request options. +// +// See ListCapacityReservations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListCapacityReservationsWithContext(ctx aws.Context, input *ListCapacityReservationsInput, opts ...request.Option) (*ListCapacityReservationsOutput, error) { + req, out := c.ListCapacityReservationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListCapacityReservationsPages iterates over the pages of a ListCapacityReservations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCapacityReservations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCapacityReservations operation. +// pageNum := 0 +// err := client.ListCapacityReservationsPages(params, +// func(page *athena.ListCapacityReservationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListCapacityReservationsPages(input *ListCapacityReservationsInput, fn func(*ListCapacityReservationsOutput, bool) bool) error { + return c.ListCapacityReservationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCapacityReservationsPagesWithContext same as ListCapacityReservationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListCapacityReservationsPagesWithContext(ctx aws.Context, input *ListCapacityReservationsInput, fn func(*ListCapacityReservationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCapacityReservationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCapacityReservationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCapacityReservationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDataCatalogs = "ListDataCatalogs" + +// ListDataCatalogsRequest generates a "aws/request.Request" representing the +// client's request for the ListDataCatalogs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDataCatalogs for more information on using the ListDataCatalogs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListDataCatalogsRequest method. +// req, resp := client.ListDataCatalogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDataCatalogs +func (c *Athena) ListDataCatalogsRequest(input *ListDataCatalogsInput) (req *request.Request, output *ListDataCatalogsOutput) { + op := &request.Operation{ + Name: opListDataCatalogs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDataCatalogsInput{} + } + + output = &ListDataCatalogsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDataCatalogs API operation for Amazon Athena. +// +// Lists the data catalogs in the current Amazon Web Services account. +// +// In the Athena console, data catalogs are listed as "data sources" on the +// Data sources page under the Data source name column. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListDataCatalogs for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDataCatalogs +func (c *Athena) ListDataCatalogs(input *ListDataCatalogsInput) (*ListDataCatalogsOutput, error) { + req, out := c.ListDataCatalogsRequest(input) + return out, req.Send() +} + +// ListDataCatalogsWithContext is the same as ListDataCatalogs with the addition of +// the ability to pass a context and additional request options. +// +// See ListDataCatalogs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListDataCatalogsWithContext(ctx aws.Context, input *ListDataCatalogsInput, opts ...request.Option) (*ListDataCatalogsOutput, error) { + req, out := c.ListDataCatalogsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDataCatalogsPages iterates over the pages of a ListDataCatalogs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDataCatalogs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDataCatalogs operation. +// pageNum := 0 +// err := client.ListDataCatalogsPages(params, +// func(page *athena.ListDataCatalogsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListDataCatalogsPages(input *ListDataCatalogsInput, fn func(*ListDataCatalogsOutput, bool) bool) error { + return c.ListDataCatalogsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDataCatalogsPagesWithContext same as ListDataCatalogsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListDataCatalogsPagesWithContext(ctx aws.Context, input *ListDataCatalogsInput, fn func(*ListDataCatalogsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDataCatalogsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDataCatalogsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDataCatalogsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDatabases = "ListDatabases" + +// ListDatabasesRequest generates a "aws/request.Request" representing the +// client's request for the ListDatabases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDatabases for more information on using the ListDatabases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListDatabasesRequest method. +// req, resp := client.ListDatabasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDatabases +func (c *Athena) ListDatabasesRequest(input *ListDatabasesInput) (req *request.Request, output *ListDatabasesOutput) { + op := &request.Operation{ + Name: opListDatabases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDatabasesInput{} + } + + output = &ListDatabasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDatabases API operation for Amazon Athena. +// +// Lists the databases in the specified data catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListDatabases for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - MetadataException +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDatabases +func (c *Athena) ListDatabases(input *ListDatabasesInput) (*ListDatabasesOutput, error) { + req, out := c.ListDatabasesRequest(input) + return out, req.Send() +} + +// ListDatabasesWithContext is the same as ListDatabases with the addition of +// the ability to pass a context and additional request options. +// +// See ListDatabases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListDatabasesWithContext(ctx aws.Context, input *ListDatabasesInput, opts ...request.Option) (*ListDatabasesOutput, error) { + req, out := c.ListDatabasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDatabasesPages iterates over the pages of a ListDatabases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDatabases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDatabases operation. +// pageNum := 0 +// err := client.ListDatabasesPages(params, +// func(page *athena.ListDatabasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListDatabasesPages(input *ListDatabasesInput, fn func(*ListDatabasesOutput, bool) bool) error { + return c.ListDatabasesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDatabasesPagesWithContext same as ListDatabasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListDatabasesPagesWithContext(ctx aws.Context, input *ListDatabasesInput, fn func(*ListDatabasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDatabasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDatabasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDatabasesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListEngineVersions = "ListEngineVersions" + +// ListEngineVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListEngineVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEngineVersions for more information on using the ListEngineVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListEngineVersionsRequest method. +// req, resp := client.ListEngineVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListEngineVersions +func (c *Athena) ListEngineVersionsRequest(input *ListEngineVersionsInput) (req *request.Request, output *ListEngineVersionsOutput) { + op := &request.Operation{ + Name: opListEngineVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEngineVersionsInput{} + } + + output = &ListEngineVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListEngineVersions API operation for Amazon Athena. +// +// Returns a list of engine versions that are available to choose from, including +// the Auto option. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListEngineVersions for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListEngineVersions +func (c *Athena) ListEngineVersions(input *ListEngineVersionsInput) (*ListEngineVersionsOutput, error) { + req, out := c.ListEngineVersionsRequest(input) + return out, req.Send() +} + +// ListEngineVersionsWithContext is the same as ListEngineVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListEngineVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListEngineVersionsWithContext(ctx aws.Context, input *ListEngineVersionsInput, opts ...request.Option) (*ListEngineVersionsOutput, error) { + req, out := c.ListEngineVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEngineVersionsPages iterates over the pages of a ListEngineVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEngineVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEngineVersions operation. +// pageNum := 0 +// err := client.ListEngineVersionsPages(params, +// func(page *athena.ListEngineVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListEngineVersionsPages(input *ListEngineVersionsInput, fn func(*ListEngineVersionsOutput, bool) bool) error { + return c.ListEngineVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListEngineVersionsPagesWithContext same as ListEngineVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListEngineVersionsPagesWithContext(ctx aws.Context, input *ListEngineVersionsInput, fn func(*ListEngineVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEngineVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEngineVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListEngineVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListExecutors = "ListExecutors" + +// ListExecutorsRequest generates a "aws/request.Request" representing the +// client's request for the ListExecutors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListExecutors for more information on using the ListExecutors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListExecutorsRequest method. +// req, resp := client.ListExecutorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListExecutors +func (c *Athena) ListExecutorsRequest(input *ListExecutorsInput) (req *request.Request, output *ListExecutorsOutput) { + op := &request.Operation{ + Name: opListExecutors, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListExecutorsInput{} + } + + output = &ListExecutorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListExecutors API operation for Amazon Athena. +// +// Lists, in descending order, the executors that joined a session. Newer executors +// are listed first; older executors are listed later. The result can be optionally +// filtered by state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListExecutors for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListExecutors +func (c *Athena) ListExecutors(input *ListExecutorsInput) (*ListExecutorsOutput, error) { + req, out := c.ListExecutorsRequest(input) + return out, req.Send() +} + +// ListExecutorsWithContext is the same as ListExecutors with the addition of +// the ability to pass a context and additional request options. +// +// See ListExecutors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListExecutorsWithContext(ctx aws.Context, input *ListExecutorsInput, opts ...request.Option) (*ListExecutorsOutput, error) { + req, out := c.ListExecutorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListExecutorsPages iterates over the pages of a ListExecutors operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListExecutors method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListExecutors operation. +// pageNum := 0 +// err := client.ListExecutorsPages(params, +// func(page *athena.ListExecutorsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListExecutorsPages(input *ListExecutorsInput, fn func(*ListExecutorsOutput, bool) bool) error { + return c.ListExecutorsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListExecutorsPagesWithContext same as ListExecutorsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListExecutorsPagesWithContext(ctx aws.Context, input *ListExecutorsInput, fn func(*ListExecutorsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListExecutorsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListExecutorsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListExecutorsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListNamedQueries = "ListNamedQueries" + +// ListNamedQueriesRequest generates a "aws/request.Request" representing the +// client's request for the ListNamedQueries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListNamedQueries for more information on using the ListNamedQueries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListNamedQueriesRequest method. +// req, resp := client.ListNamedQueriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries +func (c *Athena) ListNamedQueriesRequest(input *ListNamedQueriesInput) (req *request.Request, output *ListNamedQueriesOutput) { + op := &request.Operation{ + Name: opListNamedQueries, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListNamedQueriesInput{} + } + + output = &ListNamedQueriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListNamedQueries API operation for Amazon Athena. +// +// Provides a list of available query IDs only for queries saved in the specified +// workgroup. Requires that you have access to the specified workgroup. If a +// workgroup is not specified, lists the saved queries for the primary workgroup. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListNamedQueries for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries +func (c *Athena) ListNamedQueries(input *ListNamedQueriesInput) (*ListNamedQueriesOutput, error) { + req, out := c.ListNamedQueriesRequest(input) + return out, req.Send() +} + +// ListNamedQueriesWithContext is the same as ListNamedQueries with the addition of +// the ability to pass a context and additional request options. +// +// See ListNamedQueries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListNamedQueriesWithContext(ctx aws.Context, input *ListNamedQueriesInput, opts ...request.Option) (*ListNamedQueriesOutput, error) { + req, out := c.ListNamedQueriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListNamedQueriesPages iterates over the pages of a ListNamedQueries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListNamedQueries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListNamedQueries operation. +// pageNum := 0 +// err := client.ListNamedQueriesPages(params, +// func(page *athena.ListNamedQueriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListNamedQueriesPages(input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool) error { + return c.ListNamedQueriesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListNamedQueriesPagesWithContext same as ListNamedQueriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListNamedQueriesPagesWithContext(ctx aws.Context, input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListNamedQueriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListNamedQueriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListNamedQueriesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListNotebookMetadata = "ListNotebookMetadata" + +// ListNotebookMetadataRequest generates a "aws/request.Request" representing the +// client's request for the ListNotebookMetadata operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListNotebookMetadata for more information on using the ListNotebookMetadata +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListNotebookMetadataRequest method. +// req, resp := client.ListNotebookMetadataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNotebookMetadata +func (c *Athena) ListNotebookMetadataRequest(input *ListNotebookMetadataInput) (req *request.Request, output *ListNotebookMetadataOutput) { + op := &request.Operation{ + Name: opListNotebookMetadata, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListNotebookMetadataInput{} + } + + output = &ListNotebookMetadataOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListNotebookMetadata API operation for Amazon Athena. +// +// Displays the notebook files for the specified workgroup in paginated format. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListNotebookMetadata for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNotebookMetadata +func (c *Athena) ListNotebookMetadata(input *ListNotebookMetadataInput) (*ListNotebookMetadataOutput, error) { + req, out := c.ListNotebookMetadataRequest(input) + return out, req.Send() +} + +// ListNotebookMetadataWithContext is the same as ListNotebookMetadata with the addition of +// the ability to pass a context and additional request options. +// +// See ListNotebookMetadata for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListNotebookMetadataWithContext(ctx aws.Context, input *ListNotebookMetadataInput, opts ...request.Option) (*ListNotebookMetadataOutput, error) { + req, out := c.ListNotebookMetadataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListNotebookSessions = "ListNotebookSessions" + +// ListNotebookSessionsRequest generates a "aws/request.Request" representing the +// client's request for the ListNotebookSessions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListNotebookSessions for more information on using the ListNotebookSessions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListNotebookSessionsRequest method. +// req, resp := client.ListNotebookSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNotebookSessions +func (c *Athena) ListNotebookSessionsRequest(input *ListNotebookSessionsInput) (req *request.Request, output *ListNotebookSessionsOutput) { + op := &request.Operation{ + Name: opListNotebookSessions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListNotebookSessionsInput{} + } + + output = &ListNotebookSessionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListNotebookSessions API operation for Amazon Athena. +// +// Lists, in descending order, the sessions that have been created in a notebook +// that are in an active state like CREATING, CREATED, IDLE or BUSY. Newer sessions +// are listed first; older sessions are listed later. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListNotebookSessions for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNotebookSessions +func (c *Athena) ListNotebookSessions(input *ListNotebookSessionsInput) (*ListNotebookSessionsOutput, error) { + req, out := c.ListNotebookSessionsRequest(input) + return out, req.Send() +} + +// ListNotebookSessionsWithContext is the same as ListNotebookSessions with the addition of +// the ability to pass a context and additional request options. +// +// See ListNotebookSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListNotebookSessionsWithContext(ctx aws.Context, input *ListNotebookSessionsInput, opts ...request.Option) (*ListNotebookSessionsOutput, error) { + req, out := c.ListNotebookSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListPreparedStatements = "ListPreparedStatements" + +// ListPreparedStatementsRequest generates a "aws/request.Request" representing the +// client's request for the ListPreparedStatements operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPreparedStatements for more information on using the ListPreparedStatements +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListPreparedStatementsRequest method. +// req, resp := client.ListPreparedStatementsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListPreparedStatements +func (c *Athena) ListPreparedStatementsRequest(input *ListPreparedStatementsInput) (req *request.Request, output *ListPreparedStatementsOutput) { + op := &request.Operation{ + Name: opListPreparedStatements, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPreparedStatementsInput{} + } + + output = &ListPreparedStatementsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPreparedStatements API operation for Amazon Athena. +// +// Lists the prepared statements in the specified workgroup. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListPreparedStatements for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListPreparedStatements +func (c *Athena) ListPreparedStatements(input *ListPreparedStatementsInput) (*ListPreparedStatementsOutput, error) { + req, out := c.ListPreparedStatementsRequest(input) + return out, req.Send() +} + +// ListPreparedStatementsWithContext is the same as ListPreparedStatements with the addition of +// the ability to pass a context and additional request options. +// +// See ListPreparedStatements for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListPreparedStatementsWithContext(ctx aws.Context, input *ListPreparedStatementsInput, opts ...request.Option) (*ListPreparedStatementsOutput, error) { + req, out := c.ListPreparedStatementsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPreparedStatementsPages iterates over the pages of a ListPreparedStatements operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPreparedStatements method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPreparedStatements operation. +// pageNum := 0 +// err := client.ListPreparedStatementsPages(params, +// func(page *athena.ListPreparedStatementsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListPreparedStatementsPages(input *ListPreparedStatementsInput, fn func(*ListPreparedStatementsOutput, bool) bool) error { + return c.ListPreparedStatementsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPreparedStatementsPagesWithContext same as ListPreparedStatementsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListPreparedStatementsPagesWithContext(ctx aws.Context, input *ListPreparedStatementsInput, fn func(*ListPreparedStatementsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPreparedStatementsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPreparedStatementsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPreparedStatementsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListQueryExecutions = "ListQueryExecutions" + +// ListQueryExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the ListQueryExecutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListQueryExecutions for more information on using the ListQueryExecutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListQueryExecutionsRequest method. +// req, resp := client.ListQueryExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions +func (c *Athena) ListQueryExecutionsRequest(input *ListQueryExecutionsInput) (req *request.Request, output *ListQueryExecutionsOutput) { + op := &request.Operation{ + Name: opListQueryExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListQueryExecutionsInput{} + } + + output = &ListQueryExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListQueryExecutions API operation for Amazon Athena. +// +// Provides a list of available query execution IDs for the queries in the specified +// workgroup. Athena keeps a query history for 45 days. If a workgroup is not +// specified, returns a list of query execution IDs for the primary workgroup. +// Requires you to have access to the workgroup in which the queries ran. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListQueryExecutions for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions +func (c *Athena) ListQueryExecutions(input *ListQueryExecutionsInput) (*ListQueryExecutionsOutput, error) { + req, out := c.ListQueryExecutionsRequest(input) + return out, req.Send() +} + +// ListQueryExecutionsWithContext is the same as ListQueryExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See ListQueryExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListQueryExecutionsWithContext(ctx aws.Context, input *ListQueryExecutionsInput, opts ...request.Option) (*ListQueryExecutionsOutput, error) { + req, out := c.ListQueryExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListQueryExecutionsPages iterates over the pages of a ListQueryExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListQueryExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListQueryExecutions operation. +// pageNum := 0 +// err := client.ListQueryExecutionsPages(params, +// func(page *athena.ListQueryExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListQueryExecutionsPages(input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool) error { + return c.ListQueryExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListQueryExecutionsPagesWithContext same as ListQueryExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListQueryExecutionsPagesWithContext(ctx aws.Context, input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListQueryExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListQueryExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListQueryExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListSessions = "ListSessions" + +// ListSessionsRequest generates a "aws/request.Request" representing the +// client's request for the ListSessions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSessions for more information on using the ListSessions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListSessionsRequest method. +// req, resp := client.ListSessionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListSessions +func (c *Athena) ListSessionsRequest(input *ListSessionsInput) (req *request.Request, output *ListSessionsOutput) { + op := &request.Operation{ + Name: opListSessions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSessionsInput{} + } + + output = &ListSessionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSessions API operation for Amazon Athena. +// +// Lists the sessions in a workgroup that are in an active state like CREATING, +// CREATED, IDLE, or BUSY. Newer sessions are listed first; older sessions are +// listed later. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListSessions for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListSessions +func (c *Athena) ListSessions(input *ListSessionsInput) (*ListSessionsOutput, error) { + req, out := c.ListSessionsRequest(input) + return out, req.Send() +} + +// ListSessionsWithContext is the same as ListSessions with the addition of +// the ability to pass a context and additional request options. +// +// See ListSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListSessionsWithContext(ctx aws.Context, input *ListSessionsInput, opts ...request.Option) (*ListSessionsOutput, error) { + req, out := c.ListSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSessionsPages iterates over the pages of a ListSessions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSessions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSessions operation. +// pageNum := 0 +// err := client.ListSessionsPages(params, +// func(page *athena.ListSessionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListSessionsPages(input *ListSessionsInput, fn func(*ListSessionsOutput, bool) bool) error { + return c.ListSessionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSessionsPagesWithContext same as ListSessionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListSessionsPagesWithContext(ctx aws.Context, input *ListSessionsInput, fn func(*ListSessionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSessionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSessionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSessionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTableMetadata = "ListTableMetadata" + +// ListTableMetadataRequest generates a "aws/request.Request" representing the +// client's request for the ListTableMetadata operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTableMetadata for more information on using the ListTableMetadata +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTableMetadataRequest method. +// req, resp := client.ListTableMetadataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTableMetadata +func (c *Athena) ListTableMetadataRequest(input *ListTableMetadataInput) (req *request.Request, output *ListTableMetadataOutput) { + op := &request.Operation{ + Name: opListTableMetadata, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTableMetadataInput{} + } + + output = &ListTableMetadataOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTableMetadata API operation for Amazon Athena. +// +// Lists the metadata for the tables in the specified data catalog database. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListTableMetadata for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - MetadataException +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTableMetadata +func (c *Athena) ListTableMetadata(input *ListTableMetadataInput) (*ListTableMetadataOutput, error) { + req, out := c.ListTableMetadataRequest(input) + return out, req.Send() +} + +// ListTableMetadataWithContext is the same as ListTableMetadata with the addition of +// the ability to pass a context and additional request options. +// +// See ListTableMetadata for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListTableMetadataWithContext(ctx aws.Context, input *ListTableMetadataInput, opts ...request.Option) (*ListTableMetadataOutput, error) { + req, out := c.ListTableMetadataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTableMetadataPages iterates over the pages of a ListTableMetadata operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTableMetadata method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTableMetadata operation. +// pageNum := 0 +// err := client.ListTableMetadataPages(params, +// func(page *athena.ListTableMetadataOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListTableMetadataPages(input *ListTableMetadataInput, fn func(*ListTableMetadataOutput, bool) bool) error { + return c.ListTableMetadataPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTableMetadataPagesWithContext same as ListTableMetadataPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListTableMetadataPagesWithContext(ctx aws.Context, input *ListTableMetadataInput, fn func(*ListTableMetadataOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTableMetadataInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTableMetadataRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTableMetadataOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource +func (c *Athena) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Athena. +// +// Lists the tags associated with an Athena resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource +func (c *Athena) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTagsForResource method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTagsForResource operation. +// pageNum := 0 +// err := client.ListTagsForResourcePages(params, +// func(page *athena.ListTagsForResourceOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { + return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTagsForResourceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTagsForResourceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListWorkGroups = "ListWorkGroups" + +// ListWorkGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListWorkGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListWorkGroups for more information on using the ListWorkGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListWorkGroupsRequest method. +// req, resp := client.ListWorkGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListWorkGroups +func (c *Athena) ListWorkGroupsRequest(input *ListWorkGroupsInput) (req *request.Request, output *ListWorkGroupsOutput) { + op := &request.Operation{ + Name: opListWorkGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListWorkGroupsInput{} + } + + output = &ListWorkGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListWorkGroups API operation for Amazon Athena. +// +// Lists available workgroups for the account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListWorkGroups for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListWorkGroups +func (c *Athena) ListWorkGroups(input *ListWorkGroupsInput) (*ListWorkGroupsOutput, error) { + req, out := c.ListWorkGroupsRequest(input) + return out, req.Send() +} + +// ListWorkGroupsWithContext is the same as ListWorkGroups with the addition of +// the ability to pass a context and additional request options. +// +// See ListWorkGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListWorkGroupsWithContext(ctx aws.Context, input *ListWorkGroupsInput, opts ...request.Option) (*ListWorkGroupsOutput, error) { + req, out := c.ListWorkGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListWorkGroupsPages iterates over the pages of a ListWorkGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListWorkGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListWorkGroups operation. +// pageNum := 0 +// err := client.ListWorkGroupsPages(params, +// func(page *athena.ListWorkGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *Athena) ListWorkGroupsPages(input *ListWorkGroupsInput, fn func(*ListWorkGroupsOutput, bool) bool) error { + return c.ListWorkGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListWorkGroupsPagesWithContext same as ListWorkGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListWorkGroupsPagesWithContext(ctx aws.Context, input *ListWorkGroupsInput, fn func(*ListWorkGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListWorkGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListWorkGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListWorkGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutCapacityAssignmentConfiguration = "PutCapacityAssignmentConfiguration" + +// PutCapacityAssignmentConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutCapacityAssignmentConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutCapacityAssignmentConfiguration for more information on using the PutCapacityAssignmentConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutCapacityAssignmentConfigurationRequest method. +// req, resp := client.PutCapacityAssignmentConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/PutCapacityAssignmentConfiguration +func (c *Athena) PutCapacityAssignmentConfigurationRequest(input *PutCapacityAssignmentConfigurationInput) (req *request.Request, output *PutCapacityAssignmentConfigurationOutput) { + op := &request.Operation{ + Name: opPutCapacityAssignmentConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutCapacityAssignmentConfigurationInput{} + } + + output = &PutCapacityAssignmentConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutCapacityAssignmentConfiguration API operation for Amazon Athena. +// +// Puts a new capacity assignment configuration for a specified capacity reservation. +// If a capacity assignment configuration already exists for the capacity reservation, +// replaces the existing capacity assignment configuration. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation PutCapacityAssignmentConfiguration for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/PutCapacityAssignmentConfiguration +func (c *Athena) PutCapacityAssignmentConfiguration(input *PutCapacityAssignmentConfigurationInput) (*PutCapacityAssignmentConfigurationOutput, error) { + req, out := c.PutCapacityAssignmentConfigurationRequest(input) + return out, req.Send() +} + +// PutCapacityAssignmentConfigurationWithContext is the same as PutCapacityAssignmentConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutCapacityAssignmentConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) PutCapacityAssignmentConfigurationWithContext(ctx aws.Context, input *PutCapacityAssignmentConfigurationInput, opts ...request.Option) (*PutCapacityAssignmentConfigurationOutput, error) { + req, out := c.PutCapacityAssignmentConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartCalculationExecution = "StartCalculationExecution" + +// StartCalculationExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StartCalculationExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartCalculationExecution for more information on using the StartCalculationExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartCalculationExecutionRequest method. +// req, resp := client.StartCalculationExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartCalculationExecution +func (c *Athena) StartCalculationExecutionRequest(input *StartCalculationExecutionInput) (req *request.Request, output *StartCalculationExecutionOutput) { + op := &request.Operation{ + Name: opStartCalculationExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartCalculationExecutionInput{} + } + + output = &StartCalculationExecutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartCalculationExecution API operation for Amazon Athena. +// +// Submits calculations for execution within a session. You can supply the code +// to run as an inline code block within the request. +// +// The request syntax requires the StartCalculationExecutionRequest$CodeBlock +// parameter or the CalculationConfiguration$CodeBlock parameter, but not both. +// Because CalculationConfiguration$CodeBlock is deprecated, use the StartCalculationExecutionRequest$CodeBlock +// parameter instead. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation StartCalculationExecution for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartCalculationExecution +func (c *Athena) StartCalculationExecution(input *StartCalculationExecutionInput) (*StartCalculationExecutionOutput, error) { + req, out := c.StartCalculationExecutionRequest(input) + return out, req.Send() +} + +// StartCalculationExecutionWithContext is the same as StartCalculationExecution with the addition of +// the ability to pass a context and additional request options. +// +// See StartCalculationExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) StartCalculationExecutionWithContext(ctx aws.Context, input *StartCalculationExecutionInput, opts ...request.Option) (*StartCalculationExecutionOutput, error) { + req, out := c.StartCalculationExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartQueryExecution = "StartQueryExecution" + +// StartQueryExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StartQueryExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartQueryExecution for more information on using the StartQueryExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartQueryExecutionRequest method. +// req, resp := client.StartQueryExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartQueryExecution +func (c *Athena) StartQueryExecutionRequest(input *StartQueryExecutionInput) (req *request.Request, output *StartQueryExecutionOutput) { + op := &request.Operation{ + Name: opStartQueryExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartQueryExecutionInput{} + } + + output = &StartQueryExecutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartQueryExecution API operation for Amazon Athena. +// +// Runs the SQL query statements contained in the Query. Requires you to have +// access to the workgroup in which the query ran. Running queries against an +// external catalog requires GetDataCatalog permission to the catalog. For code +// samples using the Amazon Web Services SDK for Java, see Examples and Code +// Samples (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in +// the Amazon Athena User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation StartQueryExecution for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartQueryExecution +func (c *Athena) StartQueryExecution(input *StartQueryExecutionInput) (*StartQueryExecutionOutput, error) { + req, out := c.StartQueryExecutionRequest(input) + return out, req.Send() +} + +// StartQueryExecutionWithContext is the same as StartQueryExecution with the addition of +// the ability to pass a context and additional request options. +// +// See StartQueryExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) StartQueryExecutionWithContext(ctx aws.Context, input *StartQueryExecutionInput, opts ...request.Option) (*StartQueryExecutionOutput, error) { + req, out := c.StartQueryExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartSession = "StartSession" + +// StartSessionRequest generates a "aws/request.Request" representing the +// client's request for the StartSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartSession for more information on using the StartSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartSessionRequest method. +// req, resp := client.StartSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartSession +func (c *Athena) StartSessionRequest(input *StartSessionInput) (req *request.Request, output *StartSessionOutput) { + op := &request.Operation{ + Name: opStartSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartSessionInput{} + } + + output = &StartSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartSession API operation for Amazon Athena. +// +// Creates a session for running calculations within a workgroup. The session +// is ready when it reaches an IDLE state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation StartSession for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// - SessionAlreadyExistsException +// The specified session already exists. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StartSession +func (c *Athena) StartSession(input *StartSessionInput) (*StartSessionOutput, error) { + req, out := c.StartSessionRequest(input) + return out, req.Send() +} + +// StartSessionWithContext is the same as StartSession with the addition of +// the ability to pass a context and additional request options. +// +// See StartSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) StartSessionWithContext(ctx aws.Context, input *StartSessionInput, opts ...request.Option) (*StartSessionOutput, error) { + req, out := c.StartSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopCalculationExecution = "StopCalculationExecution" + +// StopCalculationExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StopCalculationExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopCalculationExecution for more information on using the StopCalculationExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StopCalculationExecutionRequest method. +// req, resp := client.StopCalculationExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopCalculationExecution +func (c *Athena) StopCalculationExecutionRequest(input *StopCalculationExecutionInput) (req *request.Request, output *StopCalculationExecutionOutput) { + op := &request.Operation{ + Name: opStopCalculationExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopCalculationExecutionInput{} + } + + output = &StopCalculationExecutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopCalculationExecution API operation for Amazon Athena. +// +// Requests the cancellation of a calculation. A StopCalculationExecution call +// on a calculation that is already in a terminal state (for example, STOPPED, +// FAILED, or COMPLETED) succeeds but has no effect. +// +// Cancelling a calculation is done on a best effort basis. If a calculation +// cannot be cancelled, you can be charged for its completion. If you are concerned +// about being charged for a calculation that cannot be cancelled, consider +// terminating the session in which the calculation is running. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation StopCalculationExecution for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopCalculationExecution +func (c *Athena) StopCalculationExecution(input *StopCalculationExecutionInput) (*StopCalculationExecutionOutput, error) { + req, out := c.StopCalculationExecutionRequest(input) + return out, req.Send() +} + +// StopCalculationExecutionWithContext is the same as StopCalculationExecution with the addition of +// the ability to pass a context and additional request options. +// +// See StopCalculationExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) StopCalculationExecutionWithContext(ctx aws.Context, input *StopCalculationExecutionInput, opts ...request.Option) (*StopCalculationExecutionOutput, error) { + req, out := c.StopCalculationExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopQueryExecution = "StopQueryExecution" + +// StopQueryExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StopQueryExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopQueryExecution for more information on using the StopQueryExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StopQueryExecutionRequest method. +// req, resp := client.StopQueryExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopQueryExecution +func (c *Athena) StopQueryExecutionRequest(input *StopQueryExecutionInput) (req *request.Request, output *StopQueryExecutionOutput) { + op := &request.Operation{ + Name: opStopQueryExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopQueryExecutionInput{} + } + + output = &StopQueryExecutionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopQueryExecution API operation for Amazon Athena. +// +// Stops a query execution. Requires you to have access to the workgroup in +// which the query ran. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation StopQueryExecution for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/StopQueryExecution +func (c *Athena) StopQueryExecution(input *StopQueryExecutionInput) (*StopQueryExecutionOutput, error) { + req, out := c.StopQueryExecutionRequest(input) + return out, req.Send() +} + +// StopQueryExecutionWithContext is the same as StopQueryExecution with the addition of +// the ability to pass a context and additional request options. +// +// See StopQueryExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) StopQueryExecutionWithContext(ctx aws.Context, input *StopQueryExecutionInput, opts ...request.Option) (*StopQueryExecutionOutput, error) { + req, out := c.StopQueryExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TagResource +func (c *Athena) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon Athena. +// +// Adds one or more tags to an Athena resource. A tag is a label that you assign +// to a resource. Each tag consists of a key and an optional value, both of +// which you define. For example, you can use tags to categorize Athena workgroups, +// data catalogs, or capacity reservations by purpose, owner, or environment. +// Use a consistent set of tag keys to make it easier to search and filter the +// resources in your account. For best practices, see Tagging Best Practices +// (https://docs.aws.amazon.com/whitepapers/latest/tagging-best-practices/tagging-best-practices.html). +// Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can +// be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers +// representable in UTF-8, and the following characters: + - = . _ : / @. Tag +// keys and values are case-sensitive. Tag keys must be unique per resource. +// If you specify more than one tag, separate them by commas. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TagResource +func (c *Athena) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTerminateSession = "TerminateSession" + +// TerminateSessionRequest generates a "aws/request.Request" representing the +// client's request for the TerminateSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TerminateSession for more information on using the TerminateSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the TerminateSessionRequest method. +// req, resp := client.TerminateSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TerminateSession +func (c *Athena) TerminateSessionRequest(input *TerminateSessionInput) (req *request.Request, output *TerminateSessionOutput) { + op := &request.Operation{ + Name: opTerminateSession, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateSessionInput{} + } + + output = &TerminateSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// TerminateSession API operation for Amazon Athena. +// +// Terminates an active session. A TerminateSession call on a session that is +// already inactive (for example, in a FAILED, TERMINATED or TERMINATING state) +// succeeds but has no effect. Calculations running in the session when TerminateSession +// is called are forcefully stopped, but may display as FAILED instead of STOPPED. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation TerminateSession for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/TerminateSession +func (c *Athena) TerminateSession(input *TerminateSessionInput) (*TerminateSessionOutput, error) { + req, out := c.TerminateSessionRequest(input) + return out, req.Send() +} + +// TerminateSessionWithContext is the same as TerminateSession with the addition of +// the ability to pass a context and additional request options. +// +// See TerminateSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) TerminateSessionWithContext(ctx aws.Context, input *TerminateSessionInput, opts ...request.Option) (*TerminateSessionOutput, error) { + req, out := c.TerminateSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource +func (c *Athena) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Athena. +// +// Removes one or more tags from an Athena resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource +func (c *Athena) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateCapacityReservation = "UpdateCapacityReservation" + +// UpdateCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCapacityReservation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateCapacityReservation for more information on using the UpdateCapacityReservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateCapacityReservationRequest method. +// req, resp := client.UpdateCapacityReservationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateCapacityReservation +func (c *Athena) UpdateCapacityReservationRequest(input *UpdateCapacityReservationInput) (req *request.Request, output *UpdateCapacityReservationOutput) { + op := &request.Operation{ + Name: opUpdateCapacityReservation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateCapacityReservationInput{} + } + + output = &UpdateCapacityReservationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateCapacityReservation API operation for Amazon Athena. +// +// Updates the number of requested data processing units for the capacity reservation +// with the specified name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation UpdateCapacityReservation for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateCapacityReservation +func (c *Athena) UpdateCapacityReservation(input *UpdateCapacityReservationInput) (*UpdateCapacityReservationOutput, error) { + req, out := c.UpdateCapacityReservationRequest(input) + return out, req.Send() +} + +// UpdateCapacityReservationWithContext is the same as UpdateCapacityReservation with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateCapacityReservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) UpdateCapacityReservationWithContext(ctx aws.Context, input *UpdateCapacityReservationInput, opts ...request.Option) (*UpdateCapacityReservationOutput, error) { + req, out := c.UpdateCapacityReservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDataCatalog = "UpdateDataCatalog" + +// UpdateDataCatalogRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDataCatalog operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDataCatalog for more information on using the UpdateDataCatalog +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateDataCatalogRequest method. +// req, resp := client.UpdateDataCatalogRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateDataCatalog +func (c *Athena) UpdateDataCatalogRequest(input *UpdateDataCatalogInput) (req *request.Request, output *UpdateDataCatalogOutput) { + op := &request.Operation{ + Name: opUpdateDataCatalog, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDataCatalogInput{} + } + + output = &UpdateDataCatalogOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateDataCatalog API operation for Amazon Athena. +// +// Updates the data catalog that has the specified name. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation UpdateDataCatalog for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateDataCatalog +func (c *Athena) UpdateDataCatalog(input *UpdateDataCatalogInput) (*UpdateDataCatalogOutput, error) { + req, out := c.UpdateDataCatalogRequest(input) + return out, req.Send() +} + +// UpdateDataCatalogWithContext is the same as UpdateDataCatalog with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDataCatalog for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) UpdateDataCatalogWithContext(ctx aws.Context, input *UpdateDataCatalogInput, opts ...request.Option) (*UpdateDataCatalogOutput, error) { + req, out := c.UpdateDataCatalogRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateNamedQuery = "UpdateNamedQuery" + +// UpdateNamedQueryRequest generates a "aws/request.Request" representing the +// client's request for the UpdateNamedQuery operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateNamedQuery for more information on using the UpdateNamedQuery +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateNamedQueryRequest method. +// req, resp := client.UpdateNamedQueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNamedQuery +func (c *Athena) UpdateNamedQueryRequest(input *UpdateNamedQueryInput) (req *request.Request, output *UpdateNamedQueryOutput) { + op := &request.Operation{ + Name: opUpdateNamedQuery, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateNamedQueryInput{} + } + + output = &UpdateNamedQueryOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateNamedQuery API operation for Amazon Athena. +// +// Updates a NamedQuery object. The database or workgroup cannot be updated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation UpdateNamedQuery for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNamedQuery +func (c *Athena) UpdateNamedQuery(input *UpdateNamedQueryInput) (*UpdateNamedQueryOutput, error) { + req, out := c.UpdateNamedQueryRequest(input) + return out, req.Send() +} + +// UpdateNamedQueryWithContext is the same as UpdateNamedQuery with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateNamedQuery for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) UpdateNamedQueryWithContext(ctx aws.Context, input *UpdateNamedQueryInput, opts ...request.Option) (*UpdateNamedQueryOutput, error) { + req, out := c.UpdateNamedQueryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateNotebook = "UpdateNotebook" + +// UpdateNotebookRequest generates a "aws/request.Request" representing the +// client's request for the UpdateNotebook operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateNotebook for more information on using the UpdateNotebook +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateNotebookRequest method. +// req, resp := client.UpdateNotebookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNotebook +func (c *Athena) UpdateNotebookRequest(input *UpdateNotebookInput) (req *request.Request, output *UpdateNotebookOutput) { + op := &request.Operation{ + Name: opUpdateNotebook, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateNotebookInput{} + } + + output = &UpdateNotebookOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateNotebook API operation for Amazon Athena. +// +// Updates the contents of a Spark notebook. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation UpdateNotebook for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNotebook +func (c *Athena) UpdateNotebook(input *UpdateNotebookInput) (*UpdateNotebookOutput, error) { + req, out := c.UpdateNotebookRequest(input) + return out, req.Send() +} + +// UpdateNotebookWithContext is the same as UpdateNotebook with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateNotebook for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) UpdateNotebookWithContext(ctx aws.Context, input *UpdateNotebookInput, opts ...request.Option) (*UpdateNotebookOutput, error) { + req, out := c.UpdateNotebookRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateNotebookMetadata = "UpdateNotebookMetadata" + +// UpdateNotebookMetadataRequest generates a "aws/request.Request" representing the +// client's request for the UpdateNotebookMetadata operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateNotebookMetadata for more information on using the UpdateNotebookMetadata +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateNotebookMetadataRequest method. +// req, resp := client.UpdateNotebookMetadataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNotebookMetadata +func (c *Athena) UpdateNotebookMetadataRequest(input *UpdateNotebookMetadataInput) (req *request.Request, output *UpdateNotebookMetadataOutput) { + op := &request.Operation{ + Name: opUpdateNotebookMetadata, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateNotebookMetadataInput{} + } + + output = &UpdateNotebookMetadataOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateNotebookMetadata API operation for Amazon Athena. +// +// Updates the metadata for a notebook. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation UpdateNotebookMetadata for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - TooManyRequestsException +// Indicates that the request was throttled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateNotebookMetadata +func (c *Athena) UpdateNotebookMetadata(input *UpdateNotebookMetadataInput) (*UpdateNotebookMetadataOutput, error) { + req, out := c.UpdateNotebookMetadataRequest(input) + return out, req.Send() +} + +// UpdateNotebookMetadataWithContext is the same as UpdateNotebookMetadata with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateNotebookMetadata for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) UpdateNotebookMetadataWithContext(ctx aws.Context, input *UpdateNotebookMetadataInput, opts ...request.Option) (*UpdateNotebookMetadataOutput, error) { + req, out := c.UpdateNotebookMetadataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePreparedStatement = "UpdatePreparedStatement" + +// UpdatePreparedStatementRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePreparedStatement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePreparedStatement for more information on using the UpdatePreparedStatement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdatePreparedStatementRequest method. +// req, resp := client.UpdatePreparedStatementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdatePreparedStatement +func (c *Athena) UpdatePreparedStatementRequest(input *UpdatePreparedStatementInput) (req *request.Request, output *UpdatePreparedStatementOutput) { + op := &request.Operation{ + Name: opUpdatePreparedStatement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdatePreparedStatementInput{} + } + + output = &UpdatePreparedStatementOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdatePreparedStatement API operation for Amazon Athena. +// +// Updates a prepared statement. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation UpdatePreparedStatement for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// - ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdatePreparedStatement +func (c *Athena) UpdatePreparedStatement(input *UpdatePreparedStatementInput) (*UpdatePreparedStatementOutput, error) { + req, out := c.UpdatePreparedStatementRequest(input) + return out, req.Send() +} + +// UpdatePreparedStatementWithContext is the same as UpdatePreparedStatement with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePreparedStatement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) UpdatePreparedStatementWithContext(ctx aws.Context, input *UpdatePreparedStatementInput, opts ...request.Option) (*UpdatePreparedStatementOutput, error) { + req, out := c.UpdatePreparedStatementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateWorkGroup = "UpdateWorkGroup" + +// UpdateWorkGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateWorkGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateWorkGroup for more information on using the UpdateWorkGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UpdateWorkGroupRequest method. +// req, resp := client.UpdateWorkGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateWorkGroup +func (c *Athena) UpdateWorkGroupRequest(input *UpdateWorkGroupInput) (req *request.Request, output *UpdateWorkGroupOutput) { + op := &request.Operation{ + Name: opUpdateWorkGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateWorkGroupInput{} + } + + output = &UpdateWorkGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateWorkGroup API operation for Amazon Athena. +// +// Updates the workgroup with the specified name. The workgroup's name cannot +// be changed. Only ConfigurationUpdates can be specified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation UpdateWorkGroup for usage and error information. +// +// Returned Error Types: +// +// - InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateWorkGroup +func (c *Athena) UpdateWorkGroup(input *UpdateWorkGroupInput) (*UpdateWorkGroupOutput, error) { + req, out := c.UpdateWorkGroupRequest(input) + return out, req.Send() +} + +// UpdateWorkGroupWithContext is the same as UpdateWorkGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateWorkGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) UpdateWorkGroupWithContext(ctx aws.Context, input *UpdateWorkGroupInput, opts ...request.Option) (*UpdateWorkGroupOutput, error) { + req, out := c.UpdateWorkGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Indicates that an Amazon S3 canned ACL should be set to control ownership +// of stored query results. When Athena stores query results in Amazon S3, the +// canned ACL is set with the x-amz-acl request header. For more information +// about S3 Object Ownership, see Object Ownership settings (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html#object-ownership-overview) +// in the Amazon S3 User Guide. +type AclConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 canned ACL that Athena should specify when storing query results. + // Currently the only supported canned ACL is BUCKET_OWNER_FULL_CONTROL. If + // a query runs in a workgroup and the workgroup overrides client-side settings, + // then the Amazon S3 canned ACL specified in the workgroup's settings is used + // for all queries that run in the workgroup. For more information about Amazon + // S3 canned ACLs, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl) + // in the Amazon S3 User Guide. + // + // S3AclOption is a required field + S3AclOption *string `type:"string" required:"true" enum:"S3AclOption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AclConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AclConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AclConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AclConfiguration"} + if s.S3AclOption == nil { + invalidParams.Add(request.NewErrParamRequired("S3AclOption")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3AclOption sets the S3AclOption field's value. +func (s *AclConfiguration) SetS3AclOption(v string) *AclConfiguration { + s.S3AclOption = &v + return s +} + +// Contains the application runtime IDs and their supported DPU sizes. +type ApplicationDPUSizes struct { + _ struct{} `type:"structure"` + + // The name of the supported application runtime (for example, Athena notebook + // version 1). + ApplicationRuntimeId *string `min:"1" type:"string"` + + // A list of the supported DPU sizes that the application runtime supports. + SupportedDPUSizes []*int64 `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ApplicationDPUSizes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ApplicationDPUSizes) GoString() string { + return s.String() +} + +// SetApplicationRuntimeId sets the ApplicationRuntimeId field's value. +func (s *ApplicationDPUSizes) SetApplicationRuntimeId(v string) *ApplicationDPUSizes { + s.ApplicationRuntimeId = &v + return s +} + +// SetSupportedDPUSizes sets the SupportedDPUSizes field's value. +func (s *ApplicationDPUSizes) SetSupportedDPUSizes(v []*int64) *ApplicationDPUSizes { + s.SupportedDPUSizes = v + return s +} + +// Provides information about an Athena query error. The AthenaError feature +// provides standardized error information to help you understand failed queries +// and take steps after a query failure occurs. AthenaError includes an ErrorCategory +// field that specifies whether the cause of the failed query is due to system +// error, user error, or other error. +type AthenaError struct { + _ struct{} `type:"structure"` + + // An integer value that specifies the category of a query failure error. The + // following list shows the category for each integer value. + // + // 1 - System + // + // 2 - User + // + // 3 - Other + ErrorCategory *int64 `min:"1" type:"integer"` + + // Contains a short description of the error that occurred. + ErrorMessage *string `type:"string"` + + // An integer value that provides specific information about an Athena query + // error. For the meaning of specific values, see the Error Type Reference (https://docs.aws.amazon.com/athena/latest/ug/error-reference.html#error-reference-error-type-reference) + // in the Amazon Athena User Guide. + ErrorType *int64 `type:"integer"` + + // True if the query might succeed if resubmitted. + Retryable *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AthenaError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AthenaError) GoString() string { + return s.String() +} + +// SetErrorCategory sets the ErrorCategory field's value. +func (s *AthenaError) SetErrorCategory(v int64) *AthenaError { + s.ErrorCategory = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *AthenaError) SetErrorMessage(v string) *AthenaError { + s.ErrorMessage = &v + return s +} + +// SetErrorType sets the ErrorType field's value. +func (s *AthenaError) SetErrorType(v int64) *AthenaError { + s.ErrorType = &v + return s +} + +// SetRetryable sets the Retryable field's value. +func (s *AthenaError) SetRetryable(v bool) *AthenaError { + s.Retryable = &v + return s +} + +// Contains an array of named query IDs. +type BatchGetNamedQueryInput struct { + _ struct{} `type:"structure"` + + // An array of query IDs. + // + // NamedQueryIds is a required field + NamedQueryIds []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetNamedQueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetNamedQueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetNamedQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetNamedQueryInput"} + if s.NamedQueryIds == nil { + invalidParams.Add(request.NewErrParamRequired("NamedQueryIds")) + } + if s.NamedQueryIds != nil && len(s.NamedQueryIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamedQueryIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNamedQueryIds sets the NamedQueryIds field's value. +func (s *BatchGetNamedQueryInput) SetNamedQueryIds(v []*string) *BatchGetNamedQueryInput { + s.NamedQueryIds = v + return s +} + +type BatchGetNamedQueryOutput struct { + _ struct{} `type:"structure"` + + // Information about the named query IDs submitted. + NamedQueries []*NamedQuery `type:"list"` + + // Information about provided query IDs. + UnprocessedNamedQueryIds []*UnprocessedNamedQueryId `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetNamedQueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetNamedQueryOutput) GoString() string { + return s.String() +} + +// SetNamedQueries sets the NamedQueries field's value. +func (s *BatchGetNamedQueryOutput) SetNamedQueries(v []*NamedQuery) *BatchGetNamedQueryOutput { + s.NamedQueries = v + return s +} + +// SetUnprocessedNamedQueryIds sets the UnprocessedNamedQueryIds field's value. +func (s *BatchGetNamedQueryOutput) SetUnprocessedNamedQueryIds(v []*UnprocessedNamedQueryId) *BatchGetNamedQueryOutput { + s.UnprocessedNamedQueryIds = v + return s +} + +type BatchGetPreparedStatementInput struct { + _ struct{} `type:"structure"` + + // A list of prepared statement names to return. + // + // PreparedStatementNames is a required field + PreparedStatementNames []*string `type:"list" required:"true"` + + // The name of the workgroup to which the prepared statements belong. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetPreparedStatementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetPreparedStatementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetPreparedStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetPreparedStatementInput"} + if s.PreparedStatementNames == nil { + invalidParams.Add(request.NewErrParamRequired("PreparedStatementNames")) + } + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPreparedStatementNames sets the PreparedStatementNames field's value. +func (s *BatchGetPreparedStatementInput) SetPreparedStatementNames(v []*string) *BatchGetPreparedStatementInput { + s.PreparedStatementNames = v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *BatchGetPreparedStatementInput) SetWorkGroup(v string) *BatchGetPreparedStatementInput { + s.WorkGroup = &v + return s +} + +type BatchGetPreparedStatementOutput struct { + _ struct{} `type:"structure"` + + // The list of prepared statements returned. + PreparedStatements []*PreparedStatement `type:"list"` + + // A list of one or more prepared statements that were requested but could not + // be returned. + UnprocessedPreparedStatementNames []*UnprocessedPreparedStatementName `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetPreparedStatementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetPreparedStatementOutput) GoString() string { + return s.String() +} + +// SetPreparedStatements sets the PreparedStatements field's value. +func (s *BatchGetPreparedStatementOutput) SetPreparedStatements(v []*PreparedStatement) *BatchGetPreparedStatementOutput { + s.PreparedStatements = v + return s +} + +// SetUnprocessedPreparedStatementNames sets the UnprocessedPreparedStatementNames field's value. +func (s *BatchGetPreparedStatementOutput) SetUnprocessedPreparedStatementNames(v []*UnprocessedPreparedStatementName) *BatchGetPreparedStatementOutput { + s.UnprocessedPreparedStatementNames = v + return s +} + +// Contains an array of query execution IDs. +type BatchGetQueryExecutionInput struct { + _ struct{} `type:"structure"` + + // An array of query execution IDs. + // + // QueryExecutionIds is a required field + QueryExecutionIds []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". func (s BatchGetQueryExecutionInput) String() string { return awsutil.Prettify(s) } @@ -3646,18 +7237,5328 @@ func (s BatchGetQueryExecutionInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s BatchGetQueryExecutionInput) GoString() string { +func (s BatchGetQueryExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetQueryExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetQueryExecutionInput"} + if s.QueryExecutionIds == nil { + invalidParams.Add(request.NewErrParamRequired("QueryExecutionIds")) + } + if s.QueryExecutionIds != nil && len(s.QueryExecutionIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryExecutionIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryExecutionIds sets the QueryExecutionIds field's value. +func (s *BatchGetQueryExecutionInput) SetQueryExecutionIds(v []*string) *BatchGetQueryExecutionInput { + s.QueryExecutionIds = v + return s +} + +type BatchGetQueryExecutionOutput struct { + _ struct{} `type:"structure"` + + // Information about a query execution. + QueryExecutions []*QueryExecution `type:"list"` + + // Information about the query executions that failed to run. + UnprocessedQueryExecutionIds []*UnprocessedQueryExecutionId `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetQueryExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BatchGetQueryExecutionOutput) GoString() string { + return s.String() +} + +// SetQueryExecutions sets the QueryExecutions field's value. +func (s *BatchGetQueryExecutionOutput) SetQueryExecutions(v []*QueryExecution) *BatchGetQueryExecutionOutput { + s.QueryExecutions = v + return s +} + +// SetUnprocessedQueryExecutionIds sets the UnprocessedQueryExecutionIds field's value. +func (s *BatchGetQueryExecutionOutput) SetUnprocessedQueryExecutionIds(v []*UnprocessedQueryExecutionId) *BatchGetQueryExecutionOutput { + s.UnprocessedQueryExecutionIds = v + return s +} + +// Contains configuration information for the calculation. +type CalculationConfiguration struct { + _ struct{} `type:"structure"` + + // A string that contains the code for the calculation. + CodeBlock *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationConfiguration) GoString() string { + return s.String() +} + +// SetCodeBlock sets the CodeBlock field's value. +func (s *CalculationConfiguration) SetCodeBlock(v string) *CalculationConfiguration { + s.CodeBlock = &v + return s +} + +// Contains information about an application-specific calculation result. +type CalculationResult struct { + _ struct{} `type:"structure"` + + // The Amazon S3 location of the folder for the calculation results. + ResultS3Uri *string `type:"string"` + + // The data format of the calculation result. + ResultType *string `min:"1" type:"string"` + + // The Amazon S3 location of the stderr error messages file for the calculation. + StdErrorS3Uri *string `type:"string"` + + // The Amazon S3 location of the stdout file for the calculation. + StdOutS3Uri *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationResult) GoString() string { + return s.String() +} + +// SetResultS3Uri sets the ResultS3Uri field's value. +func (s *CalculationResult) SetResultS3Uri(v string) *CalculationResult { + s.ResultS3Uri = &v + return s +} + +// SetResultType sets the ResultType field's value. +func (s *CalculationResult) SetResultType(v string) *CalculationResult { + s.ResultType = &v + return s +} + +// SetStdErrorS3Uri sets the StdErrorS3Uri field's value. +func (s *CalculationResult) SetStdErrorS3Uri(v string) *CalculationResult { + s.StdErrorS3Uri = &v + return s +} + +// SetStdOutS3Uri sets the StdOutS3Uri field's value. +func (s *CalculationResult) SetStdOutS3Uri(v string) *CalculationResult { + s.StdOutS3Uri = &v + return s +} + +// Contains statistics for a notebook calculation. +type CalculationStatistics struct { + _ struct{} `type:"structure"` + + // The data processing unit execution time in milliseconds for the calculation. + DpuExecutionInMillis *int64 `type:"long"` + + // The progress of the calculation. + Progress *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationStatistics) GoString() string { + return s.String() +} + +// SetDpuExecutionInMillis sets the DpuExecutionInMillis field's value. +func (s *CalculationStatistics) SetDpuExecutionInMillis(v int64) *CalculationStatistics { + s.DpuExecutionInMillis = &v + return s +} + +// SetProgress sets the Progress field's value. +func (s *CalculationStatistics) SetProgress(v string) *CalculationStatistics { + s.Progress = &v + return s +} + +// Contains information about the status of a notebook calculation. +type CalculationStatus struct { + _ struct{} `type:"structure"` + + // The date and time the calculation completed processing. + CompletionDateTime *time.Time `type:"timestamp"` + + // The state of the calculation execution. A description of each state follows. + // + // CREATING - The calculation is in the process of being created. + // + // CREATED - The calculation has been created and is ready to run. + // + // QUEUED - The calculation has been queued for processing. + // + // RUNNING - The calculation is running. + // + // CANCELING - A request to cancel the calculation has been received and the + // system is working to stop it. + // + // CANCELED - The calculation is no longer running as the result of a cancel + // request. + // + // COMPLETED - The calculation has completed without error. + // + // FAILED - The calculation failed and is no longer running. + State *string `type:"string" enum:"CalculationExecutionState"` + + // The reason for the calculation state change (for example, the calculation + // was canceled because the session was terminated). + StateChangeReason *string `min:"1" type:"string"` + + // The date and time the calculation was submitted for processing. + SubmissionDateTime *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationStatus) GoString() string { + return s.String() +} + +// SetCompletionDateTime sets the CompletionDateTime field's value. +func (s *CalculationStatus) SetCompletionDateTime(v time.Time) *CalculationStatus { + s.CompletionDateTime = &v + return s +} + +// SetState sets the State field's value. +func (s *CalculationStatus) SetState(v string) *CalculationStatus { + s.State = &v + return s +} + +// SetStateChangeReason sets the StateChangeReason field's value. +func (s *CalculationStatus) SetStateChangeReason(v string) *CalculationStatus { + s.StateChangeReason = &v + return s +} + +// SetSubmissionDateTime sets the SubmissionDateTime field's value. +func (s *CalculationStatus) SetSubmissionDateTime(v time.Time) *CalculationStatus { + s.SubmissionDateTime = &v + return s +} + +// Summary information for a notebook calculation. +type CalculationSummary struct { + _ struct{} `type:"structure"` + + // The calculation execution UUID. + CalculationExecutionId *string `min:"1" type:"string"` + + // A description of the calculation. + Description *string `min:"1" type:"string"` + + // Contains information about the status of the calculation. + Status *CalculationStatus `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CalculationSummary) GoString() string { + return s.String() +} + +// SetCalculationExecutionId sets the CalculationExecutionId field's value. +func (s *CalculationSummary) SetCalculationExecutionId(v string) *CalculationSummary { + s.CalculationExecutionId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CalculationSummary) SetDescription(v string) *CalculationSummary { + s.Description = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CalculationSummary) SetStatus(v *CalculationStatus) *CalculationSummary { + s.Status = v + return s +} + +type CancelCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The name of the capacity reservation to cancel. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelCapacityReservationInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CancelCapacityReservationInput) SetName(v string) *CancelCapacityReservationInput { + s.Name = &v + return s +} + +type CancelCapacityReservationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CancelCapacityReservationOutput) GoString() string { + return s.String() +} + +// Contains the submission time of a single allocation request for a capacity +// reservation and the most recent status of the attempted allocation. +type CapacityAllocation struct { + _ struct{} `type:"structure"` + + // The time when the capacity allocation request was completed. + RequestCompletionTime *time.Time `type:"timestamp"` + + // The time when the capacity allocation was requested. + // + // RequestTime is a required field + RequestTime *time.Time `type:"timestamp" required:"true"` + + // The status of the capacity allocation. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"CapacityAllocationStatus"` + + // The status message of the capacity allocation. + StatusMessage *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityAllocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityAllocation) GoString() string { + return s.String() +} + +// SetRequestCompletionTime sets the RequestCompletionTime field's value. +func (s *CapacityAllocation) SetRequestCompletionTime(v time.Time) *CapacityAllocation { + s.RequestCompletionTime = &v + return s +} + +// SetRequestTime sets the RequestTime field's value. +func (s *CapacityAllocation) SetRequestTime(v time.Time) *CapacityAllocation { + s.RequestTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CapacityAllocation) SetStatus(v string) *CapacityAllocation { + s.Status = &v + return s +} + +// SetStatusMessage sets the StatusMessage field's value. +func (s *CapacityAllocation) SetStatusMessage(v string) *CapacityAllocation { + s.StatusMessage = &v + return s +} + +// A mapping between one or more workgroups and a capacity reservation. +type CapacityAssignment struct { + _ struct{} `type:"structure"` + + // The list of workgroup names for the capacity assignment. + WorkGroupNames []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityAssignment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityAssignment) GoString() string { + return s.String() +} + +// SetWorkGroupNames sets the WorkGroupNames field's value. +func (s *CapacityAssignment) SetWorkGroupNames(v []*string) *CapacityAssignment { + s.WorkGroupNames = v + return s +} + +// Assigns Athena workgroups (and hence their queries) to capacity reservations. +// A capacity reservation can have only one capacity assignment configuration, +// but the capacity assignment configuration can be made up of multiple individual +// assignments. Each assignment specifies how Athena queries can consume capacity +// from the capacity reservation that their workgroup is mapped to. +type CapacityAssignmentConfiguration struct { + _ struct{} `type:"structure"` + + // The list of assignments that make up the capacity assignment configuration. + CapacityAssignments []*CapacityAssignment `type:"list"` + + // The name of the reservation that the capacity assignment configuration is + // for. + CapacityReservationName *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityAssignmentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityAssignmentConfiguration) GoString() string { + return s.String() +} + +// SetCapacityAssignments sets the CapacityAssignments field's value. +func (s *CapacityAssignmentConfiguration) SetCapacityAssignments(v []*CapacityAssignment) *CapacityAssignmentConfiguration { + s.CapacityAssignments = v + return s +} + +// SetCapacityReservationName sets the CapacityReservationName field's value. +func (s *CapacityAssignmentConfiguration) SetCapacityReservationName(v string) *CapacityAssignmentConfiguration { + s.CapacityReservationName = &v + return s +} + +// A reservation for a specified number of data processing units (DPUs). When +// a reservation is initially created, it has no DPUs. Athena allocates DPUs +// until the allocated amount equals the requested amount. +type CapacityReservation struct { + _ struct{} `type:"structure"` + + // The number of data processing units currently allocated. + // + // AllocatedDpus is a required field + AllocatedDpus *int64 `type:"integer" required:"true"` + + // The time in UTC epoch millis when the capacity reservation was created. + // + // CreationTime is a required field + CreationTime *time.Time `type:"timestamp" required:"true"` + + // Contains the submission time of a single allocation request for a capacity + // reservation and the most recent status of the attempted allocation. + LastAllocation *CapacityAllocation `type:"structure"` + + // The time of the most recent capacity allocation that succeeded. + LastSuccessfulAllocationTime *time.Time `type:"timestamp"` + + // The name of the capacity reservation. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The status of the capacity reservation. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"CapacityReservationStatus"` + + // The number of data processing units requested. + // + // TargetDpus is a required field + TargetDpus *int64 `min:"24" type:"integer" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityReservation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CapacityReservation) GoString() string { + return s.String() +} + +// SetAllocatedDpus sets the AllocatedDpus field's value. +func (s *CapacityReservation) SetAllocatedDpus(v int64) *CapacityReservation { + s.AllocatedDpus = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *CapacityReservation) SetCreationTime(v time.Time) *CapacityReservation { + s.CreationTime = &v + return s +} + +// SetLastAllocation sets the LastAllocation field's value. +func (s *CapacityReservation) SetLastAllocation(v *CapacityAllocation) *CapacityReservation { + s.LastAllocation = v + return s +} + +// SetLastSuccessfulAllocationTime sets the LastSuccessfulAllocationTime field's value. +func (s *CapacityReservation) SetLastSuccessfulAllocationTime(v time.Time) *CapacityReservation { + s.LastSuccessfulAllocationTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *CapacityReservation) SetName(v string) *CapacityReservation { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CapacityReservation) SetStatus(v string) *CapacityReservation { + s.Status = &v + return s +} + +// SetTargetDpus sets the TargetDpus field's value. +func (s *CapacityReservation) SetTargetDpus(v int64) *CapacityReservation { + s.TargetDpus = &v + return s +} + +// Contains metadata for a column in a table. +type Column struct { + _ struct{} `type:"structure"` + + // Optional information about the column. + Comment *string `type:"string"` + + // The name of the column. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The data type of the column. + Type *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Column) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Column) GoString() string { + return s.String() +} + +// SetComment sets the Comment field's value. +func (s *Column) SetComment(v string) *Column { + s.Comment = &v + return s +} + +// SetName sets the Name field's value. +func (s *Column) SetName(v string) *Column { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *Column) SetType(v string) *Column { + s.Type = &v + return s +} + +// Information about the columns in a query execution result. +type ColumnInfo struct { + _ struct{} `type:"structure"` + + // Indicates whether values in the column are case-sensitive. + CaseSensitive *bool `type:"boolean"` + + // The catalog to which the query results belong. + CatalogName *string `type:"string"` + + // A column label. + Label *string `type:"string"` + + // The name of the column. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // Unsupported constraint. This value always shows as UNKNOWN. + Nullable *string `type:"string" enum:"ColumnNullable"` + + // For DECIMAL data types, specifies the total number of digits, up to 38. For + // performance reasons, we recommend up to 18 digits. + Precision *int64 `type:"integer"` + + // For DECIMAL data types, specifies the total number of digits in the fractional + // part of the value. Defaults to 0. + Scale *int64 `type:"integer"` + + // The schema name (database name) to which the query results belong. + SchemaName *string `type:"string"` + + // The table name for the query results. + TableName *string `type:"string"` + + // The data type of the column. + // + // Type is a required field + Type *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ColumnInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ColumnInfo) GoString() string { + return s.String() +} + +// SetCaseSensitive sets the CaseSensitive field's value. +func (s *ColumnInfo) SetCaseSensitive(v bool) *ColumnInfo { + s.CaseSensitive = &v + return s +} + +// SetCatalogName sets the CatalogName field's value. +func (s *ColumnInfo) SetCatalogName(v string) *ColumnInfo { + s.CatalogName = &v + return s +} + +// SetLabel sets the Label field's value. +func (s *ColumnInfo) SetLabel(v string) *ColumnInfo { + s.Label = &v + return s +} + +// SetName sets the Name field's value. +func (s *ColumnInfo) SetName(v string) *ColumnInfo { + s.Name = &v + return s +} + +// SetNullable sets the Nullable field's value. +func (s *ColumnInfo) SetNullable(v string) *ColumnInfo { + s.Nullable = &v + return s +} + +// SetPrecision sets the Precision field's value. +func (s *ColumnInfo) SetPrecision(v int64) *ColumnInfo { + s.Precision = &v + return s +} + +// SetScale sets the Scale field's value. +func (s *ColumnInfo) SetScale(v int64) *ColumnInfo { + s.Scale = &v + return s +} + +// SetSchemaName sets the SchemaName field's value. +func (s *ColumnInfo) SetSchemaName(v string) *ColumnInfo { + s.SchemaName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ColumnInfo) SetTableName(v string) *ColumnInfo { + s.TableName = &v + return s +} + +// SetType sets the Type field's value. +func (s *ColumnInfo) SetType(v string) *ColumnInfo { + s.Type = &v + return s +} + +type CreateCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The name of the capacity reservation to create. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The tags for the capacity reservation. + Tags []*Tag `type:"list"` + + // The number of requested data processing units. + // + // TargetDpus is a required field + TargetDpus *int64 `min:"24" type:"integer" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCapacityReservationInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.TargetDpus == nil { + invalidParams.Add(request.NewErrParamRequired("TargetDpus")) + } + if s.TargetDpus != nil && *s.TargetDpus < 24 { + invalidParams.Add(request.NewErrParamMinValue("TargetDpus", 24)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateCapacityReservationInput) SetName(v string) *CreateCapacityReservationInput { + s.Name = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateCapacityReservationInput) SetTags(v []*Tag) *CreateCapacityReservationInput { + s.Tags = v + return s +} + +// SetTargetDpus sets the TargetDpus field's value. +func (s *CreateCapacityReservationInput) SetTargetDpus(v int64) *CreateCapacityReservationInput { + s.TargetDpus = &v + return s +} + +type CreateCapacityReservationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateCapacityReservationOutput) GoString() string { + return s.String() +} + +type CreateDataCatalogInput struct { + _ struct{} `type:"structure"` + + // A description of the data catalog to be created. + Description *string `min:"1" type:"string"` + + // The name of the data catalog to create. The catalog name must be unique for + // the Amazon Web Services account and can use a maximum of 127 alphanumeric, + // underscore, at sign, or hyphen characters. The remainder of the length constraint + // of 256 is reserved for use by Athena. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Specifies the Lambda function or functions to use for creating the data catalog. + // This is a mapping whose values depend on the catalog type. + // + // * For the HIVE data catalog type, use the following syntax. The metadata-function + // parameter is required. The sdk-version parameter is optional and defaults + // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number + // + // * For the LAMBDA data catalog type, use one of the following sets of required + // parameters, but not both. If you have one Lambda function that processes + // metadata and another for reading the actual data, use the following syntax. + // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn + // If you have a composite Lambda function that processes both metadata and + // data, use the following syntax to specify your Lambda function. function=lambda_arn + // + // * The GLUE type takes a catalog ID parameter and is required. The catalog_id + // is the account ID of the Amazon Web Services account to which the Glue + // Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type + // also applies to the default AwsDataCatalog that already exists in your + // account, of which you can have only one and cannot modify. + Parameters map[string]*string `type:"map"` + + // A list of comma separated tags to add to the data catalog that is created. + Tags []*Tag `type:"list"` + + // The type of data catalog to create: LAMBDA for a federated catalog, HIVE + // for an external hive metastore, or GLUE for an Glue Data Catalog. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"DataCatalogType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDataCatalogInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDataCatalogInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataCatalogInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDataCatalogInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateDataCatalogInput) SetDescription(v string) *CreateDataCatalogInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDataCatalogInput) SetName(v string) *CreateDataCatalogInput { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *CreateDataCatalogInput) SetParameters(v map[string]*string) *CreateDataCatalogInput { + s.Parameters = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDataCatalogInput) SetTags(v []*Tag) *CreateDataCatalogInput { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *CreateDataCatalogInput) SetType(v string) *CreateDataCatalogInput { + s.Type = &v + return s +} + +type CreateDataCatalogOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDataCatalogOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateDataCatalogOutput) GoString() string { + return s.String() +} + +type CreateNamedQueryInput struct { + _ struct{} `type:"structure"` + + // A unique case-sensitive string used to ensure the request to create the query + // is idempotent (executes only once). If another CreateNamedQuery request is + // received, the same response is returned and another query is not created. + // If a parameter has changed, for example, the QueryString, an error is returned. + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for + // users. If you are not using the Amazon Web Services SDK or the Amazon Web + // Services CLI, you must provide this token or the action will fail. + ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` + + // The database to which the query belongs. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // The query description. + Description *string `min:"1" type:"string"` + + // The query name. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The contents of the query with all query statements. + // + // QueryString is a required field + QueryString *string `min:"1" type:"string" required:"true"` + + // The name of the workgroup in which the named query is being created. + WorkGroup *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateNamedQueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateNamedQueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNamedQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNamedQueryInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) + } + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateNamedQueryInput) SetClientRequestToken(v string) *CreateNamedQueryInput { + s.ClientRequestToken = &v + return s +} + +// SetDatabase sets the Database field's value. +func (s *CreateNamedQueryInput) SetDatabase(v string) *CreateNamedQueryInput { + s.Database = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateNamedQueryInput) SetDescription(v string) *CreateNamedQueryInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateNamedQueryInput) SetName(v string) *CreateNamedQueryInput { + s.Name = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *CreateNamedQueryInput) SetQueryString(v string) *CreateNamedQueryInput { + s.QueryString = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *CreateNamedQueryInput) SetWorkGroup(v string) *CreateNamedQueryInput { + s.WorkGroup = &v + return s +} + +type CreateNamedQueryOutput struct { + _ struct{} `type:"structure"` + + // The unique ID of the query. + NamedQueryId *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateNamedQueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateNamedQueryOutput) GoString() string { + return s.String() +} + +// SetNamedQueryId sets the NamedQueryId field's value. +func (s *CreateNamedQueryOutput) SetNamedQueryId(v string) *CreateNamedQueryOutput { + s.NamedQueryId = &v + return s +} + +type CreateNotebookInput struct { + _ struct{} `type:"structure"` + + // A unique case-sensitive string used to ensure the request to create the notebook + // is idempotent (executes only once). + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for + // you. If you are not using the Amazon Web Services SDK or the Amazon Web Services + // CLI, you must provide this token or the action will fail. + ClientRequestToken *string `min:"1" type:"string"` + + // The name of the ipynb file to be created in the Spark workgroup, without + // the .ipynb extension. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The name of the Spark enabled workgroup in which the notebook will be created. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateNotebookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateNotebookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNotebookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNotebookInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateNotebookInput) SetClientRequestToken(v string) *CreateNotebookInput { + s.ClientRequestToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateNotebookInput) SetName(v string) *CreateNotebookInput { + s.Name = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *CreateNotebookInput) SetWorkGroup(v string) *CreateNotebookInput { + s.WorkGroup = &v + return s +} + +type CreateNotebookOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the notebook. + NotebookId *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateNotebookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateNotebookOutput) GoString() string { + return s.String() +} + +// SetNotebookId sets the NotebookId field's value. +func (s *CreateNotebookOutput) SetNotebookId(v string) *CreateNotebookOutput { + s.NotebookId = &v + return s +} + +type CreatePreparedStatementInput struct { + _ struct{} `type:"structure"` + + // The description of the prepared statement. + Description *string `min:"1" type:"string"` + + // The query string for the prepared statement. + // + // QueryStatement is a required field + QueryStatement *string `min:"1" type:"string" required:"true"` + + // The name of the prepared statement. + // + // StatementName is a required field + StatementName *string `min:"1" type:"string" required:"true"` + + // The name of the workgroup to which the prepared statement belongs. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreatePreparedStatementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreatePreparedStatementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePreparedStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePreparedStatementInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.QueryStatement == nil { + invalidParams.Add(request.NewErrParamRequired("QueryStatement")) + } + if s.QueryStatement != nil && len(*s.QueryStatement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryStatement", 1)) + } + if s.StatementName == nil { + invalidParams.Add(request.NewErrParamRequired("StatementName")) + } + if s.StatementName != nil && len(*s.StatementName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) + } + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreatePreparedStatementInput) SetDescription(v string) *CreatePreparedStatementInput { + s.Description = &v + return s +} + +// SetQueryStatement sets the QueryStatement field's value. +func (s *CreatePreparedStatementInput) SetQueryStatement(v string) *CreatePreparedStatementInput { + s.QueryStatement = &v + return s +} + +// SetStatementName sets the StatementName field's value. +func (s *CreatePreparedStatementInput) SetStatementName(v string) *CreatePreparedStatementInput { + s.StatementName = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *CreatePreparedStatementInput) SetWorkGroup(v string) *CreatePreparedStatementInput { + s.WorkGroup = &v + return s +} + +type CreatePreparedStatementOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreatePreparedStatementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreatePreparedStatementOutput) GoString() string { + return s.String() +} + +type CreatePresignedNotebookUrlInput struct { + _ struct{} `type:"structure"` + + // The session ID. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreatePresignedNotebookUrlInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreatePresignedNotebookUrlInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePresignedNotebookUrlInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePresignedNotebookUrlInput"} + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSessionId sets the SessionId field's value. +func (s *CreatePresignedNotebookUrlInput) SetSessionId(v string) *CreatePresignedNotebookUrlInput { + s.SessionId = &v + return s +} + +type CreatePresignedNotebookUrlOutput struct { + _ struct{} `type:"structure"` + + // The authentication token for the notebook. + // + // AuthToken is a required field + AuthToken *string `type:"string" required:"true"` + + // The UTC epoch time when the authentication token expires. + // + // AuthTokenExpirationTime is a required field + AuthTokenExpirationTime *int64 `type:"long" required:"true"` + + // The URL of the notebook. The URL includes the authentication token and notebook + // file name and points directly to the opened notebook. + // + // NotebookUrl is a required field + NotebookUrl *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreatePresignedNotebookUrlOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreatePresignedNotebookUrlOutput) GoString() string { + return s.String() +} + +// SetAuthToken sets the AuthToken field's value. +func (s *CreatePresignedNotebookUrlOutput) SetAuthToken(v string) *CreatePresignedNotebookUrlOutput { + s.AuthToken = &v + return s +} + +// SetAuthTokenExpirationTime sets the AuthTokenExpirationTime field's value. +func (s *CreatePresignedNotebookUrlOutput) SetAuthTokenExpirationTime(v int64) *CreatePresignedNotebookUrlOutput { + s.AuthTokenExpirationTime = &v + return s +} + +// SetNotebookUrl sets the NotebookUrl field's value. +func (s *CreatePresignedNotebookUrlOutput) SetNotebookUrl(v string) *CreatePresignedNotebookUrlOutput { + s.NotebookUrl = &v + return s +} + +type CreateWorkGroupInput struct { + _ struct{} `type:"structure"` + + // Contains configuration information for creating an Athena SQL workgroup or + // Spark enabled Athena workgroup. Athena SQL workgroup configuration includes + // the location in Amazon S3 where query and calculation results are stored, + // the encryption configuration, if any, used for encrypting query results, + // whether the Amazon CloudWatch Metrics are enabled for the workgroup, the + // limit for the amount of bytes scanned (cutoff) per query, if it is specified, + // and whether workgroup's settings (specified with EnforceWorkGroupConfiguration) + // in the WorkGroupConfiguration override client-side settings. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + Configuration *WorkGroupConfiguration `type:"structure"` + + // The workgroup description. + Description *string `type:"string"` + + // The workgroup name. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // A list of comma separated tags to add to the workgroup that is created. + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateWorkGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateWorkGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWorkGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWorkGroupInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Configuration != nil { + if err := s.Configuration.Validate(); err != nil { + invalidParams.AddNested("Configuration", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfiguration sets the Configuration field's value. +func (s *CreateWorkGroupInput) SetConfiguration(v *WorkGroupConfiguration) *CreateWorkGroupInput { + s.Configuration = v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateWorkGroupInput) SetDescription(v string) *CreateWorkGroupInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateWorkGroupInput) SetName(v string) *CreateWorkGroupInput { + s.Name = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateWorkGroupInput) SetTags(v []*Tag) *CreateWorkGroupInput { + s.Tags = v + return s +} + +type CreateWorkGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateWorkGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateWorkGroupOutput) GoString() string { + return s.String() +} + +// Specifies the customer managed KMS key that is used to encrypt the user's +// data stores in Athena. When an Amazon Web Services managed key is used, this +// value is null. This setting does not apply to Athena SQL workgroups. +type CustomerContentEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The customer managed KMS key that is used to encrypt the user's data stores + // in Athena. + // + // KmsKey is a required field + KmsKey *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CustomerContentEncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CustomerContentEncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomerContentEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomerContentEncryptionConfiguration"} + if s.KmsKey == nil { + invalidParams.Add(request.NewErrParamRequired("KmsKey")) + } + if s.KmsKey != nil && len(*s.KmsKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKey", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKey sets the KmsKey field's value. +func (s *CustomerContentEncryptionConfiguration) SetKmsKey(v string) *CustomerContentEncryptionConfiguration { + s.KmsKey = &v + return s +} + +// Contains information about a data catalog in an Amazon Web Services account. +// +// In the Athena console, data catalogs are listed as "data sources" on the +// Data sources page under the Data source name column. +type DataCatalog struct { + _ struct{} `type:"structure"` + + // An optional description of the data catalog. + Description *string `min:"1" type:"string"` + + // The name of the data catalog. The catalog name must be unique for the Amazon + // Web Services account and can use a maximum of 127 alphanumeric, underscore, + // at sign, or hyphen characters. The remainder of the length constraint of + // 256 is reserved for use by Athena. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Specifies the Lambda function or functions to use for the data catalog. This + // is a mapping whose values depend on the catalog type. + // + // * For the HIVE data catalog type, use the following syntax. The metadata-function + // parameter is required. The sdk-version parameter is optional and defaults + // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number + // + // * For the LAMBDA data catalog type, use one of the following sets of required + // parameters, but not both. If you have one Lambda function that processes + // metadata and another for reading the actual data, use the following syntax. + // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn + // If you have a composite Lambda function that processes both metadata and + // data, use the following syntax to specify your Lambda function. function=lambda_arn + // + // * The GLUE type takes a catalog ID parameter and is required. The catalog_id + // is the account ID of the Amazon Web Services account to which the Glue + // catalog belongs. catalog-id=catalog_id The GLUE data catalog type also + // applies to the default AwsDataCatalog that already exists in your account, + // of which you can have only one and cannot modify. + Parameters map[string]*string `type:"map"` + + // The type of data catalog to create: LAMBDA for a federated catalog, HIVE + // for an external hive metastore, or GLUE for an Glue Data Catalog. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"DataCatalogType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataCatalog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataCatalog) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *DataCatalog) SetDescription(v string) *DataCatalog { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *DataCatalog) SetName(v string) *DataCatalog { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *DataCatalog) SetParameters(v map[string]*string) *DataCatalog { + s.Parameters = v + return s +} + +// SetType sets the Type field's value. +func (s *DataCatalog) SetType(v string) *DataCatalog { + s.Type = &v + return s +} + +// The summary information for the data catalog, which includes its name and +// type. +type DataCatalogSummary struct { + _ struct{} `type:"structure"` + + // The name of the data catalog. The catalog name is unique for the Amazon Web + // Services account and can use a maximum of 127 alphanumeric, underscore, at + // sign, or hyphen characters. The remainder of the length constraint of 256 + // is reserved for use by Athena. + CatalogName *string `min:"1" type:"string"` + + // The data catalog type. + Type *string `type:"string" enum:"DataCatalogType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataCatalogSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DataCatalogSummary) GoString() string { + return s.String() +} + +// SetCatalogName sets the CatalogName field's value. +func (s *DataCatalogSummary) SetCatalogName(v string) *DataCatalogSummary { + s.CatalogName = &v + return s +} + +// SetType sets the Type field's value. +func (s *DataCatalogSummary) SetType(v string) *DataCatalogSummary { + s.Type = &v + return s +} + +// Contains metadata information for a database in a data catalog. +type Database struct { + _ struct{} `type:"structure"` + + // An optional description of the database. + Description *string `min:"1" type:"string"` + + // The name of the database. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A set of custom key/value pairs. + Parameters map[string]*string `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Database) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Database) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *Database) SetDescription(v string) *Database { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *Database) SetName(v string) *Database { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *Database) SetParameters(v map[string]*string) *Database { + s.Parameters = v + return s +} + +// A piece of data (a field in the table). +type Datum struct { + _ struct{} `type:"structure"` + + // The value of the datum. + VarCharValue *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Datum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Datum) GoString() string { + return s.String() +} + +// SetVarCharValue sets the VarCharValue field's value. +func (s *Datum) SetVarCharValue(v string) *Datum { + s.VarCharValue = &v + return s +} + +type DeleteCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The name of the capacity reservation to delete. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCapacityReservationInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteCapacityReservationInput) SetName(v string) *DeleteCapacityReservationInput { + s.Name = &v + return s +} + +type DeleteCapacityReservationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteCapacityReservationOutput) GoString() string { + return s.String() +} + +type DeleteDataCatalogInput struct { + _ struct{} `type:"structure"` + + // The name of the data catalog to delete. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataCatalogInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataCatalogInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDataCatalogInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDataCatalogInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteDataCatalogInput) SetName(v string) *DeleteDataCatalogInput { + s.Name = &v + return s +} + +type DeleteDataCatalogOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataCatalogOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteDataCatalogOutput) GoString() string { + return s.String() +} + +type DeleteNamedQueryInput struct { + _ struct{} `type:"structure"` + + // The unique ID of the query to delete. + NamedQueryId *string `min:"1" type:"string" idempotencyToken:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteNamedQueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteNamedQueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNamedQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNamedQueryInput"} + if s.NamedQueryId != nil && len(*s.NamedQueryId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamedQueryId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNamedQueryId sets the NamedQueryId field's value. +func (s *DeleteNamedQueryInput) SetNamedQueryId(v string) *DeleteNamedQueryInput { + s.NamedQueryId = &v + return s +} + +type DeleteNamedQueryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteNamedQueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteNamedQueryOutput) GoString() string { + return s.String() +} + +type DeleteNotebookInput struct { + _ struct{} `type:"structure"` + + // The ID of the notebook to delete. + // + // NotebookId is a required field + NotebookId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteNotebookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteNotebookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteNotebookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNotebookInput"} + if s.NotebookId == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookId")) + } + if s.NotebookId != nil && len(*s.NotebookId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookId sets the NotebookId field's value. +func (s *DeleteNotebookInput) SetNotebookId(v string) *DeleteNotebookInput { + s.NotebookId = &v + return s +} + +type DeleteNotebookOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteNotebookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteNotebookOutput) GoString() string { + return s.String() +} + +type DeletePreparedStatementInput struct { + _ struct{} `type:"structure"` + + // The name of the prepared statement to delete. + // + // StatementName is a required field + StatementName *string `min:"1" type:"string" required:"true"` + + // The workgroup to which the statement to be deleted belongs. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePreparedStatementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePreparedStatementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePreparedStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePreparedStatementInput"} + if s.StatementName == nil { + invalidParams.Add(request.NewErrParamRequired("StatementName")) + } + if s.StatementName != nil && len(*s.StatementName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) + } + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatementName sets the StatementName field's value. +func (s *DeletePreparedStatementInput) SetStatementName(v string) *DeletePreparedStatementInput { + s.StatementName = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *DeletePreparedStatementInput) SetWorkGroup(v string) *DeletePreparedStatementInput { + s.WorkGroup = &v + return s +} + +type DeletePreparedStatementOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePreparedStatementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePreparedStatementOutput) GoString() string { + return s.String() +} + +type DeleteWorkGroupInput struct { + _ struct{} `type:"structure"` + + // The option to delete the workgroup and its contents even if the workgroup + // contains any named queries, query executions, or notebooks. + RecursiveDeleteOption *bool `type:"boolean"` + + // The unique name of the workgroup to delete. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteWorkGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteWorkGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteWorkGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteWorkGroupInput"} + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRecursiveDeleteOption sets the RecursiveDeleteOption field's value. +func (s *DeleteWorkGroupInput) SetRecursiveDeleteOption(v bool) *DeleteWorkGroupInput { + s.RecursiveDeleteOption = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *DeleteWorkGroupInput) SetWorkGroup(v string) *DeleteWorkGroupInput { + s.WorkGroup = &v + return s +} + +type DeleteWorkGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteWorkGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteWorkGroupOutput) GoString() string { + return s.String() +} + +// If query and calculation results are encrypted in Amazon S3, indicates the +// encryption option used (for example, SSE_KMS or CSE_KMS) and key information. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether Amazon S3 server-side encryption with Amazon S3-managed + // keys (SSE_S3), server-side encryption with KMS-managed keys (SSE_KMS), or + // client-side encryption with KMS-managed keys (CSE_KMS) is used. + // + // If a query runs in a workgroup and the workgroup overrides client-side settings, + // then the workgroup's setting for encryption is used. It specifies whether + // query results must be encrypted, for all queries that run in this workgroup. + // + // EncryptionOption is a required field + EncryptionOption *string `type:"string" required:"true" enum:"EncryptionOption"` + + // For SSE_KMS and CSE_KMS, this is the KMS key ARN or ID. + KmsKey *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionConfiguration"} + if s.EncryptionOption == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionOption")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionOption sets the EncryptionOption field's value. +func (s *EncryptionConfiguration) SetEncryptionOption(v string) *EncryptionConfiguration { + s.EncryptionOption = &v + return s +} + +// SetKmsKey sets the KmsKey field's value. +func (s *EncryptionConfiguration) SetKmsKey(v string) *EncryptionConfiguration { + s.KmsKey = &v + return s +} + +// Contains data processing unit (DPU) configuration settings and parameter +// mappings for a notebook engine. +type EngineConfiguration struct { + _ struct{} `type:"structure"` + + // Contains additional notebook engine MAP parameter mappings + // in the form of key-value pairs. To specify an Athena notebook that the Jupyter + // server will download and serve, specify a value for the StartSessionRequest$NotebookVersion + // field, and then add a key named NotebookId to AdditionalConfigs that has + // the value of the Athena notebook ID. + AdditionalConfigs map[string]*string `type:"map"` + + // The number of DPUs to use for the coordinator. A coordinator is a special + // executor that orchestrates processing work and manages other executors in + // a notebook session. The default is 1. + CoordinatorDpuSize *int64 `min:"1" type:"integer"` + + // The default number of DPUs to use for executors. An executor is the smallest + // unit of compute that a notebook session can request from Athena. The default + // is 1. + DefaultExecutorDpuSize *int64 `min:"1" type:"integer"` + + // The maximum number of DPUs that can run concurrently. + // + // MaxConcurrentDpus is a required field + MaxConcurrentDpus *int64 `min:"2" type:"integer" required:"true"` + + // Specifies custom jar files and Spark properties for use cases like cluster + // encryption, table formats, and general Spark tuning. + SparkProperties map[string]*string `type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EngineConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EngineConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EngineConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EngineConfiguration"} + if s.CoordinatorDpuSize != nil && *s.CoordinatorDpuSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("CoordinatorDpuSize", 1)) + } + if s.DefaultExecutorDpuSize != nil && *s.DefaultExecutorDpuSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("DefaultExecutorDpuSize", 1)) + } + if s.MaxConcurrentDpus == nil { + invalidParams.Add(request.NewErrParamRequired("MaxConcurrentDpus")) + } + if s.MaxConcurrentDpus != nil && *s.MaxConcurrentDpus < 2 { + invalidParams.Add(request.NewErrParamMinValue("MaxConcurrentDpus", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdditionalConfigs sets the AdditionalConfigs field's value. +func (s *EngineConfiguration) SetAdditionalConfigs(v map[string]*string) *EngineConfiguration { + s.AdditionalConfigs = v + return s +} + +// SetCoordinatorDpuSize sets the CoordinatorDpuSize field's value. +func (s *EngineConfiguration) SetCoordinatorDpuSize(v int64) *EngineConfiguration { + s.CoordinatorDpuSize = &v + return s +} + +// SetDefaultExecutorDpuSize sets the DefaultExecutorDpuSize field's value. +func (s *EngineConfiguration) SetDefaultExecutorDpuSize(v int64) *EngineConfiguration { + s.DefaultExecutorDpuSize = &v + return s +} + +// SetMaxConcurrentDpus sets the MaxConcurrentDpus field's value. +func (s *EngineConfiguration) SetMaxConcurrentDpus(v int64) *EngineConfiguration { + s.MaxConcurrentDpus = &v + return s +} + +// SetSparkProperties sets the SparkProperties field's value. +func (s *EngineConfiguration) SetSparkProperties(v map[string]*string) *EngineConfiguration { + s.SparkProperties = v + return s +} + +// The Athena engine version for running queries, or the PySpark engine version +// for running sessions. +type EngineVersion struct { + _ struct{} `type:"structure"` + + // Read only. The engine version on which the query runs. If the user requests + // a valid engine version other than Auto, the effective engine version is the + // same as the engine version that the user requested. If the user requests + // Auto, the effective engine version is chosen by Athena. When a request to + // update the engine version is made by a CreateWorkGroup or UpdateWorkGroup + // operation, the EffectiveEngineVersion field is ignored. + EffectiveEngineVersion *string `min:"1" type:"string"` + + // The engine version requested by the user. Possible values are determined + // by the output of ListEngineVersions, including AUTO. The default is AUTO. + SelectedEngineVersion *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EngineVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EngineVersion) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EngineVersion) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EngineVersion"} + if s.EffectiveEngineVersion != nil && len(*s.EffectiveEngineVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EffectiveEngineVersion", 1)) + } + if s.SelectedEngineVersion != nil && len(*s.SelectedEngineVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SelectedEngineVersion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEffectiveEngineVersion sets the EffectiveEngineVersion field's value. +func (s *EngineVersion) SetEffectiveEngineVersion(v string) *EngineVersion { + s.EffectiveEngineVersion = &v + return s +} + +// SetSelectedEngineVersion sets the SelectedEngineVersion field's value. +func (s *EngineVersion) SetSelectedEngineVersion(v string) *EngineVersion { + s.SelectedEngineVersion = &v + return s +} + +// Contains summary information about an executor. +type ExecutorsSummary struct { + _ struct{} `type:"structure"` + + // The UUID of the executor. + // + // ExecutorId is a required field + ExecutorId *string `type:"string" required:"true"` + + // The smallest unit of compute that a session can request from Athena. Size + // is measured in data processing unit (DPU) values, a relative measure of processing + // power. + ExecutorSize *int64 `type:"long"` + + // The processing state of the executor. A description of each state follows. + // + // CREATING - The executor is being started, including acquiring resources. + // + // CREATED - The executor has been started. + // + // REGISTERED - The executor has been registered. + // + // TERMINATING - The executor is in the process of shutting down. + // + // TERMINATED - The executor is no longer running. + // + // FAILED - Due to a failure, the executor is no longer running. + ExecutorState *string `type:"string" enum:"ExecutorState"` + + // The type of executor used for the application (COORDINATOR, GATEWAY, or WORKER). + ExecutorType *string `type:"string" enum:"ExecutorType"` + + // The date and time that the executor started. + StartDateTime *int64 `type:"long"` + + // The date and time that the executor was terminated. + TerminationDateTime *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecutorsSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExecutorsSummary) GoString() string { + return s.String() +} + +// SetExecutorId sets the ExecutorId field's value. +func (s *ExecutorsSummary) SetExecutorId(v string) *ExecutorsSummary { + s.ExecutorId = &v + return s +} + +// SetExecutorSize sets the ExecutorSize field's value. +func (s *ExecutorsSummary) SetExecutorSize(v int64) *ExecutorsSummary { + s.ExecutorSize = &v + return s +} + +// SetExecutorState sets the ExecutorState field's value. +func (s *ExecutorsSummary) SetExecutorState(v string) *ExecutorsSummary { + s.ExecutorState = &v + return s +} + +// SetExecutorType sets the ExecutorType field's value. +func (s *ExecutorsSummary) SetExecutorType(v string) *ExecutorsSummary { + s.ExecutorType = &v + return s +} + +// SetStartDateTime sets the StartDateTime field's value. +func (s *ExecutorsSummary) SetStartDateTime(v int64) *ExecutorsSummary { + s.StartDateTime = &v + return s +} + +// SetTerminationDateTime sets the TerminationDateTime field's value. +func (s *ExecutorsSummary) SetTerminationDateTime(v int64) *ExecutorsSummary { + s.TerminationDateTime = &v + return s +} + +type ExportNotebookInput struct { + _ struct{} `type:"structure"` + + // The ID of the notebook to export. + // + // NotebookId is a required field + NotebookId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportNotebookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportNotebookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportNotebookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportNotebookInput"} + if s.NotebookId == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookId")) + } + if s.NotebookId != nil && len(*s.NotebookId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookId sets the NotebookId field's value. +func (s *ExportNotebookInput) SetNotebookId(v string) *ExportNotebookInput { + s.NotebookId = &v + return s +} + +type ExportNotebookOutput struct { + _ struct{} `type:"structure"` + + // The notebook metadata, including notebook ID, notebook name, and workgroup + // name. + NotebookMetadata *NotebookMetadata `type:"structure"` + + // The content of the exported notebook. + Payload *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportNotebookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExportNotebookOutput) GoString() string { + return s.String() +} + +// SetNotebookMetadata sets the NotebookMetadata field's value. +func (s *ExportNotebookOutput) SetNotebookMetadata(v *NotebookMetadata) *ExportNotebookOutput { + s.NotebookMetadata = v + return s +} + +// SetPayload sets the Payload field's value. +func (s *ExportNotebookOutput) SetPayload(v string) *ExportNotebookOutput { + s.Payload = &v + return s +} + +// A string for searching notebook names. +type FilterDefinition struct { + _ struct{} `type:"structure"` + + // The name of the notebook to search for. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FilterDefinition"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *FilterDefinition) SetName(v string) *FilterDefinition { + s.Name = &v + return s +} + +type GetCalculationExecutionCodeInput struct { + _ struct{} `type:"structure"` + + // The calculation execution UUID. + // + // CalculationExecutionId is a required field + CalculationExecutionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionCodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionCodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCalculationExecutionCodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCalculationExecutionCodeInput"} + if s.CalculationExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("CalculationExecutionId")) + } + if s.CalculationExecutionId != nil && len(*s.CalculationExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CalculationExecutionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCalculationExecutionId sets the CalculationExecutionId field's value. +func (s *GetCalculationExecutionCodeInput) SetCalculationExecutionId(v string) *GetCalculationExecutionCodeInput { + s.CalculationExecutionId = &v + return s +} + +type GetCalculationExecutionCodeOutput struct { + _ struct{} `type:"structure"` + + // The unencrypted code that was executed for the calculation. + CodeBlock *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionCodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionCodeOutput) GoString() string { + return s.String() +} + +// SetCodeBlock sets the CodeBlock field's value. +func (s *GetCalculationExecutionCodeOutput) SetCodeBlock(v string) *GetCalculationExecutionCodeOutput { + s.CodeBlock = &v + return s +} + +type GetCalculationExecutionInput struct { + _ struct{} `type:"structure"` + + // The calculation execution UUID. + // + // CalculationExecutionId is a required field + CalculationExecutionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCalculationExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCalculationExecutionInput"} + if s.CalculationExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("CalculationExecutionId")) + } + if s.CalculationExecutionId != nil && len(*s.CalculationExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CalculationExecutionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCalculationExecutionId sets the CalculationExecutionId field's value. +func (s *GetCalculationExecutionInput) SetCalculationExecutionId(v string) *GetCalculationExecutionInput { + s.CalculationExecutionId = &v + return s +} + +type GetCalculationExecutionOutput struct { + _ struct{} `type:"structure"` + + // The calculation execution UUID. + CalculationExecutionId *string `min:"1" type:"string"` + + // The description of the calculation execution. + Description *string `min:"1" type:"string"` + + // Contains result information. This field is populated only if the calculation + // is completed. + Result *CalculationResult `type:"structure"` + + // The session ID that the calculation ran in. + SessionId *string `min:"1" type:"string"` + + // Contains information about the data processing unit (DPU) execution time + // and progress. This field is populated only when statistics are available. + Statistics *CalculationStatistics `type:"structure"` + + // Contains information about the status of the calculation. + Status *CalculationStatus `type:"structure"` + + // The Amazon S3 location in which calculation results are stored. + WorkingDirectory *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionOutput) GoString() string { + return s.String() +} + +// SetCalculationExecutionId sets the CalculationExecutionId field's value. +func (s *GetCalculationExecutionOutput) SetCalculationExecutionId(v string) *GetCalculationExecutionOutput { + s.CalculationExecutionId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *GetCalculationExecutionOutput) SetDescription(v string) *GetCalculationExecutionOutput { + s.Description = &v + return s +} + +// SetResult sets the Result field's value. +func (s *GetCalculationExecutionOutput) SetResult(v *CalculationResult) *GetCalculationExecutionOutput { + s.Result = v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *GetCalculationExecutionOutput) SetSessionId(v string) *GetCalculationExecutionOutput { + s.SessionId = &v + return s +} + +// SetStatistics sets the Statistics field's value. +func (s *GetCalculationExecutionOutput) SetStatistics(v *CalculationStatistics) *GetCalculationExecutionOutput { + s.Statistics = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetCalculationExecutionOutput) SetStatus(v *CalculationStatus) *GetCalculationExecutionOutput { + s.Status = v + return s +} + +// SetWorkingDirectory sets the WorkingDirectory field's value. +func (s *GetCalculationExecutionOutput) SetWorkingDirectory(v string) *GetCalculationExecutionOutput { + s.WorkingDirectory = &v + return s +} + +type GetCalculationExecutionStatusInput struct { + _ struct{} `type:"structure"` + + // The calculation execution UUID. + // + // CalculationExecutionId is a required field + CalculationExecutionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCalculationExecutionStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCalculationExecutionStatusInput"} + if s.CalculationExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("CalculationExecutionId")) + } + if s.CalculationExecutionId != nil && len(*s.CalculationExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CalculationExecutionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCalculationExecutionId sets the CalculationExecutionId field's value. +func (s *GetCalculationExecutionStatusInput) SetCalculationExecutionId(v string) *GetCalculationExecutionStatusInput { + s.CalculationExecutionId = &v + return s +} + +type GetCalculationExecutionStatusOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the DPU execution time and progress. + Statistics *CalculationStatistics `type:"structure"` + + // Contains information about the calculation execution status. + Status *CalculationStatus `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCalculationExecutionStatusOutput) GoString() string { + return s.String() +} + +// SetStatistics sets the Statistics field's value. +func (s *GetCalculationExecutionStatusOutput) SetStatistics(v *CalculationStatistics) *GetCalculationExecutionStatusOutput { + s.Statistics = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetCalculationExecutionStatusOutput) SetStatus(v *CalculationStatus) *GetCalculationExecutionStatusOutput { + s.Status = v + return s +} + +type GetCapacityAssignmentConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the capacity reservation to retrieve the capacity assignment + // configuration for. + // + // CapacityReservationName is a required field + CapacityReservationName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCapacityAssignmentConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCapacityAssignmentConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCapacityAssignmentConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCapacityAssignmentConfigurationInput"} + if s.CapacityReservationName == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationName")) + } + if s.CapacityReservationName != nil && len(*s.CapacityReservationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CapacityReservationName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationName sets the CapacityReservationName field's value. +func (s *GetCapacityAssignmentConfigurationInput) SetCapacityReservationName(v string) *GetCapacityAssignmentConfigurationInput { + s.CapacityReservationName = &v + return s +} + +type GetCapacityAssignmentConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The requested capacity assignment configuration for the specified capacity + // reservation. + // + // CapacityAssignmentConfiguration is a required field + CapacityAssignmentConfiguration *CapacityAssignmentConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCapacityAssignmentConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCapacityAssignmentConfigurationOutput) GoString() string { + return s.String() +} + +// SetCapacityAssignmentConfiguration sets the CapacityAssignmentConfiguration field's value. +func (s *GetCapacityAssignmentConfigurationOutput) SetCapacityAssignmentConfiguration(v *CapacityAssignmentConfiguration) *GetCapacityAssignmentConfigurationOutput { + s.CapacityAssignmentConfiguration = v + return s +} + +type GetCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The name of the capacity reservation. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCapacityReservationInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetCapacityReservationInput) SetName(v string) *GetCapacityReservationInput { + s.Name = &v + return s +} + +type GetCapacityReservationOutput struct { + _ struct{} `type:"structure"` + + // The requested capacity reservation structure. + // + // CapacityReservation is a required field + CapacityReservation *CapacityReservation `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCapacityReservationOutput) GoString() string { + return s.String() +} + +// SetCapacityReservation sets the CapacityReservation field's value. +func (s *GetCapacityReservationOutput) SetCapacityReservation(v *CapacityReservation) *GetCapacityReservationOutput { + s.CapacityReservation = v + return s +} + +type GetDataCatalogInput struct { + _ struct{} `type:"structure"` + + // The name of the data catalog to return. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The name of the workgroup. Required if making an IAM Identity Center request. + WorkGroup *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataCatalogInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataCatalogInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDataCatalogInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDataCatalogInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetDataCatalogInput) SetName(v string) *GetDataCatalogInput { + s.Name = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *GetDataCatalogInput) SetWorkGroup(v string) *GetDataCatalogInput { + s.WorkGroup = &v + return s +} + +type GetDataCatalogOutput struct { + _ struct{} `type:"structure"` + + // The data catalog returned. + DataCatalog *DataCatalog `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataCatalogOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDataCatalogOutput) GoString() string { + return s.String() +} + +// SetDataCatalog sets the DataCatalog field's value. +func (s *GetDataCatalogOutput) SetDataCatalog(v *DataCatalog) *GetDataCatalogOutput { + s.DataCatalog = v + return s +} + +type GetDatabaseInput struct { + _ struct{} `type:"structure"` + + // The name of the data catalog that contains the database to return. + // + // CatalogName is a required field + CatalogName *string `min:"1" type:"string" required:"true"` + + // The name of the database to return. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the workgroup for which the metadata is being fetched. Required + // if requesting an IAM Identity Center enabled Glue Data Catalog. + WorkGroup *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDatabaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDatabaseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDatabaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDatabaseInput"} + if s.CatalogName == nil { + invalidParams.Add(request.NewErrParamRequired("CatalogName")) + } + if s.CatalogName != nil && len(*s.CatalogName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogName sets the CatalogName field's value. +func (s *GetDatabaseInput) SetCatalogName(v string) *GetDatabaseInput { + s.CatalogName = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetDatabaseInput) SetDatabaseName(v string) *GetDatabaseInput { + s.DatabaseName = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *GetDatabaseInput) SetWorkGroup(v string) *GetDatabaseInput { + s.WorkGroup = &v + return s +} + +type GetDatabaseOutput struct { + _ struct{} `type:"structure"` + + // The database returned. + Database *Database `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDatabaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetDatabaseOutput) GoString() string { + return s.String() +} + +// SetDatabase sets the Database field's value. +func (s *GetDatabaseOutput) SetDatabase(v *Database) *GetDatabaseOutput { + s.Database = v + return s +} + +type GetNamedQueryInput struct { + _ struct{} `type:"structure"` + + // The unique ID of the query. Use ListNamedQueries to get query IDs. + // + // NamedQueryId is a required field + NamedQueryId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetNamedQueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetNamedQueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetNamedQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetNamedQueryInput"} + if s.NamedQueryId == nil { + invalidParams.Add(request.NewErrParamRequired("NamedQueryId")) + } + if s.NamedQueryId != nil && len(*s.NamedQueryId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamedQueryId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNamedQueryId sets the NamedQueryId field's value. +func (s *GetNamedQueryInput) SetNamedQueryId(v string) *GetNamedQueryInput { + s.NamedQueryId = &v + return s +} + +type GetNamedQueryOutput struct { + _ struct{} `type:"structure"` + + // Information about the query. + NamedQuery *NamedQuery `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetNamedQueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetNamedQueryOutput) GoString() string { + return s.String() +} + +// SetNamedQuery sets the NamedQuery field's value. +func (s *GetNamedQueryOutput) SetNamedQuery(v *NamedQuery) *GetNamedQueryOutput { + s.NamedQuery = v + return s +} + +type GetNotebookMetadataInput struct { + _ struct{} `type:"structure"` + + // The ID of the notebook whose metadata is to be retrieved. + // + // NotebookId is a required field + NotebookId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetNotebookMetadataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetNotebookMetadataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetNotebookMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetNotebookMetadataInput"} + if s.NotebookId == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookId")) + } + if s.NotebookId != nil && len(*s.NotebookId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookId sets the NotebookId field's value. +func (s *GetNotebookMetadataInput) SetNotebookId(v string) *GetNotebookMetadataInput { + s.NotebookId = &v + return s +} + +type GetNotebookMetadataOutput struct { + _ struct{} `type:"structure"` + + // The metadata that is returned for the specified notebook ID. + NotebookMetadata *NotebookMetadata `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetNotebookMetadataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetNotebookMetadataOutput) GoString() string { + return s.String() +} + +// SetNotebookMetadata sets the NotebookMetadata field's value. +func (s *GetNotebookMetadataOutput) SetNotebookMetadata(v *NotebookMetadata) *GetNotebookMetadataOutput { + s.NotebookMetadata = v + return s +} + +type GetPreparedStatementInput struct { + _ struct{} `type:"structure"` + + // The name of the prepared statement to retrieve. + // + // StatementName is a required field + StatementName *string `min:"1" type:"string" required:"true"` + + // The workgroup to which the statement to be retrieved belongs. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPreparedStatementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPreparedStatementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPreparedStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPreparedStatementInput"} + if s.StatementName == nil { + invalidParams.Add(request.NewErrParamRequired("StatementName")) + } + if s.StatementName != nil && len(*s.StatementName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) + } + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatementName sets the StatementName field's value. +func (s *GetPreparedStatementInput) SetStatementName(v string) *GetPreparedStatementInput { + s.StatementName = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *GetPreparedStatementInput) SetWorkGroup(v string) *GetPreparedStatementInput { + s.WorkGroup = &v + return s +} + +type GetPreparedStatementOutput struct { + _ struct{} `type:"structure"` + + // The name of the prepared statement that was retrieved. + PreparedStatement *PreparedStatement `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPreparedStatementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPreparedStatementOutput) GoString() string { + return s.String() +} + +// SetPreparedStatement sets the PreparedStatement field's value. +func (s *GetPreparedStatementOutput) SetPreparedStatement(v *PreparedStatement) *GetPreparedStatementOutput { + s.PreparedStatement = v + return s +} + +type GetQueryExecutionInput struct { + _ struct{} `type:"structure"` + + // The unique ID of the query execution. + // + // QueryExecutionId is a required field + QueryExecutionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetQueryExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetQueryExecutionInput"} + if s.QueryExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) + } + if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryExecutionId sets the QueryExecutionId field's value. +func (s *GetQueryExecutionInput) SetQueryExecutionId(v string) *GetQueryExecutionInput { + s.QueryExecutionId = &v + return s +} + +type GetQueryExecutionOutput struct { + _ struct{} `type:"structure"` + + // Information about the query execution. + QueryExecution *QueryExecution `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryExecutionOutput) GoString() string { + return s.String() +} + +// SetQueryExecution sets the QueryExecution field's value. +func (s *GetQueryExecutionOutput) SetQueryExecution(v *QueryExecution) *GetQueryExecutionOutput { + s.QueryExecution = v + return s +} + +type GetQueryResultsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results (rows) to return in this request. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The unique ID of the query execution. + // + // QueryExecutionId is a required field + QueryExecutionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryResultsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryResultsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetQueryResultsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetQueryResultsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.QueryExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) + } + if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetQueryResultsInput) SetMaxResults(v int64) *GetQueryResultsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetQueryResultsInput) SetNextToken(v string) *GetQueryResultsInput { + s.NextToken = &v + return s +} + +// SetQueryExecutionId sets the QueryExecutionId field's value. +func (s *GetQueryResultsInput) SetQueryExecutionId(v string) *GetQueryResultsInput { + s.QueryExecutionId = &v + return s +} + +type GetQueryResultsOutput struct { + _ struct{} `type:"structure"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The results of the query execution. + ResultSet *ResultSet `type:"structure"` + + // The number of rows inserted with a CREATE TABLE AS SELECT statement. + UpdateCount *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryResultsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryResultsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetQueryResultsOutput) SetNextToken(v string) *GetQueryResultsOutput { + s.NextToken = &v + return s +} + +// SetResultSet sets the ResultSet field's value. +func (s *GetQueryResultsOutput) SetResultSet(v *ResultSet) *GetQueryResultsOutput { + s.ResultSet = v + return s +} + +// SetUpdateCount sets the UpdateCount field's value. +func (s *GetQueryResultsOutput) SetUpdateCount(v int64) *GetQueryResultsOutput { + s.UpdateCount = &v + return s +} + +type GetQueryRuntimeStatisticsInput struct { + _ struct{} `type:"structure"` + + // The unique ID of the query execution. + // + // QueryExecutionId is a required field + QueryExecutionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryRuntimeStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryRuntimeStatisticsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetQueryRuntimeStatisticsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetQueryRuntimeStatisticsInput"} + if s.QueryExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) + } + if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryExecutionId sets the QueryExecutionId field's value. +func (s *GetQueryRuntimeStatisticsInput) SetQueryExecutionId(v string) *GetQueryRuntimeStatisticsInput { + s.QueryExecutionId = &v + return s +} + +type GetQueryRuntimeStatisticsOutput struct { + _ struct{} `type:"structure"` + + // Runtime statistics about the query execution. + QueryRuntimeStatistics *QueryRuntimeStatistics `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryRuntimeStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetQueryRuntimeStatisticsOutput) GoString() string { + return s.String() +} + +// SetQueryRuntimeStatistics sets the QueryRuntimeStatistics field's value. +func (s *GetQueryRuntimeStatisticsOutput) SetQueryRuntimeStatistics(v *QueryRuntimeStatistics) *GetQueryRuntimeStatisticsOutput { + s.QueryRuntimeStatistics = v + return s +} + +type GetSessionInput struct { + _ struct{} `type:"structure"` + + // The session ID. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionInput"} + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSessionId sets the SessionId field's value. +func (s *GetSessionInput) SetSessionId(v string) *GetSessionInput { + s.SessionId = &v + return s +} + +type GetSessionOutput struct { + _ struct{} `type:"structure"` + + // The session description. + Description *string `min:"1" type:"string"` + + // Contains engine configuration information like DPU usage. + EngineConfiguration *EngineConfiguration `type:"structure"` + + // The engine version used by the session (for example, PySpark engine version + // 3). You can get a list of engine versions by calling ListEngineVersions. + EngineVersion *string `min:"1" type:"string"` + + // The notebook version. + NotebookVersion *string `min:"1" type:"string"` + + // Contains the workgroup configuration information used by the session. + SessionConfiguration *SessionConfiguration `type:"structure"` + + // The session ID. + SessionId *string `min:"1" type:"string"` + + // Contains the DPU execution time. + Statistics *SessionStatistics `type:"structure"` + + // Contains information about the status of the session. + Status *SessionStatus `type:"structure"` + + // The workgroup to which the session belongs. + WorkGroup *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *GetSessionOutput) SetDescription(v string) *GetSessionOutput { + s.Description = &v + return s +} + +// SetEngineConfiguration sets the EngineConfiguration field's value. +func (s *GetSessionOutput) SetEngineConfiguration(v *EngineConfiguration) *GetSessionOutput { + s.EngineConfiguration = v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *GetSessionOutput) SetEngineVersion(v string) *GetSessionOutput { + s.EngineVersion = &v + return s +} + +// SetNotebookVersion sets the NotebookVersion field's value. +func (s *GetSessionOutput) SetNotebookVersion(v string) *GetSessionOutput { + s.NotebookVersion = &v + return s +} + +// SetSessionConfiguration sets the SessionConfiguration field's value. +func (s *GetSessionOutput) SetSessionConfiguration(v *SessionConfiguration) *GetSessionOutput { + s.SessionConfiguration = v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *GetSessionOutput) SetSessionId(v string) *GetSessionOutput { + s.SessionId = &v + return s +} + +// SetStatistics sets the Statistics field's value. +func (s *GetSessionOutput) SetStatistics(v *SessionStatistics) *GetSessionOutput { + s.Statistics = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetSessionOutput) SetStatus(v *SessionStatus) *GetSessionOutput { + s.Status = v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *GetSessionOutput) SetWorkGroup(v string) *GetSessionOutput { + s.WorkGroup = &v + return s +} + +type GetSessionStatusInput struct { + _ struct{} `type:"structure"` + + // The session ID. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionStatusInput"} + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSessionId sets the SessionId field's value. +func (s *GetSessionStatusInput) SetSessionId(v string) *GetSessionStatusInput { + s.SessionId = &v + return s +} + +type GetSessionStatusOutput struct { + _ struct{} `type:"structure"` + + // The session ID. + SessionId *string `min:"1" type:"string"` + + // Contains information about the status of the session. + Status *SessionStatus `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionStatusOutput) GoString() string { + return s.String() +} + +// SetSessionId sets the SessionId field's value. +func (s *GetSessionStatusOutput) SetSessionId(v string) *GetSessionStatusOutput { + s.SessionId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetSessionStatusOutput) SetStatus(v *SessionStatus) *GetSessionStatusOutput { + s.Status = v + return s +} + +type GetTableMetadataInput struct { + _ struct{} `type:"structure"` + + // The name of the data catalog that contains the database and table metadata + // to return. + // + // CatalogName is a required field + CatalogName *string `min:"1" type:"string" required:"true"` + + // The name of the database that contains the table metadata to return. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the table for which metadata is returned. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` + + // The name of the workgroup for which the metadata is being fetched. Required + // if requesting an IAM Identity Center enabled Glue Data Catalog. + WorkGroup *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTableMetadataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTableMetadataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTableMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTableMetadataInput"} + if s.CatalogName == nil { + invalidParams.Add(request.NewErrParamRequired("CatalogName")) + } + if s.CatalogName != nil && len(*s.CatalogName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogName sets the CatalogName field's value. +func (s *GetTableMetadataInput) SetCatalogName(v string) *GetTableMetadataInput { + s.CatalogName = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetTableMetadataInput) SetDatabaseName(v string) *GetTableMetadataInput { + s.DatabaseName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *GetTableMetadataInput) SetTableName(v string) *GetTableMetadataInput { + s.TableName = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *GetTableMetadataInput) SetWorkGroup(v string) *GetTableMetadataInput { + s.WorkGroup = &v + return s +} + +type GetTableMetadataOutput struct { + _ struct{} `type:"structure"` + + // An object that contains table metadata. + TableMetadata *TableMetadata `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTableMetadataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetTableMetadataOutput) GoString() string { + return s.String() +} + +// SetTableMetadata sets the TableMetadata field's value. +func (s *GetTableMetadataOutput) SetTableMetadata(v *TableMetadata) *GetTableMetadataOutput { + s.TableMetadata = v + return s +} + +type GetWorkGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the workgroup. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetWorkGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetWorkGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetWorkGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetWorkGroupInput"} + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *GetWorkGroupInput) SetWorkGroup(v string) *GetWorkGroupInput { + s.WorkGroup = &v + return s +} + +type GetWorkGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the workgroup. + WorkGroup *WorkGroup `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetWorkGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetWorkGroupOutput) GoString() string { + return s.String() +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *GetWorkGroupOutput) SetWorkGroup(v *WorkGroup) *GetWorkGroupOutput { + s.WorkGroup = v + return s +} + +// Specifies whether the workgroup is IAM Identity Center supported. +type IdentityCenterConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether the workgroup is IAM Identity Center supported. + EnableIdentityCenter *bool `type:"boolean"` + + // The IAM Identity Center instance ARN that the workgroup associates to. + IdentityCenterInstanceArn *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IdentityCenterConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IdentityCenterConfiguration) GoString() string { + return s.String() +} + +// SetEnableIdentityCenter sets the EnableIdentityCenter field's value. +func (s *IdentityCenterConfiguration) SetEnableIdentityCenter(v bool) *IdentityCenterConfiguration { + s.EnableIdentityCenter = &v + return s +} + +// SetIdentityCenterInstanceArn sets the IdentityCenterInstanceArn field's value. +func (s *IdentityCenterConfiguration) SetIdentityCenterInstanceArn(v string) *IdentityCenterConfiguration { + s.IdentityCenterInstanceArn = &v + return s +} + +type ImportNotebookInput struct { + _ struct{} `type:"structure"` + + // A unique case-sensitive string used to ensure the request to import the notebook + // is idempotent (executes only once). + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for + // you. If you are not using the Amazon Web Services SDK or the Amazon Web Services + // CLI, you must provide this token or the action will fail. + ClientRequestToken *string `min:"1" type:"string"` + + // The name of the notebook to import. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A URI that specifies the Amazon S3 location of a notebook file in ipynb format. + NotebookS3LocationUri *string `type:"string"` + + // The notebook content to be imported. The payload must be in ipynb format. + Payload *string `min:"1" type:"string"` + + // The notebook content type. Currently, the only valid type is IPYNB. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"NotebookType"` + + // The name of the Spark enabled workgroup to import the notebook to. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportNotebookInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportNotebookInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportNotebookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportNotebookInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Payload != nil && len(*s.Payload) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Payload", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *ImportNotebookInput) SetClientRequestToken(v string) *ImportNotebookInput { + s.ClientRequestToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *ImportNotebookInput) SetName(v string) *ImportNotebookInput { + s.Name = &v + return s +} + +// SetNotebookS3LocationUri sets the NotebookS3LocationUri field's value. +func (s *ImportNotebookInput) SetNotebookS3LocationUri(v string) *ImportNotebookInput { + s.NotebookS3LocationUri = &v + return s +} + +// SetPayload sets the Payload field's value. +func (s *ImportNotebookInput) SetPayload(v string) *ImportNotebookInput { + s.Payload = &v + return s +} + +// SetType sets the Type field's value. +func (s *ImportNotebookInput) SetType(v string) *ImportNotebookInput { + s.Type = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *ImportNotebookInput) SetWorkGroup(v string) *ImportNotebookInput { + s.WorkGroup = &v + return s +} + +type ImportNotebookOutput struct { + _ struct{} `type:"structure"` + + // The ID assigned to the imported notebook. + NotebookId *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportNotebookOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ImportNotebookOutput) GoString() string { + return s.String() +} + +// SetNotebookId sets the NotebookId field's value. +func (s *ImportNotebookOutput) SetNotebookId(v string) *ImportNotebookOutput { + s.NotebookId = &v + return s +} + +// Indicates a platform issue, which may be due to a transient condition or +// outage. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error code returned when the query execution failed to process, or when + // the processing request for the named query failed. + AthenaErrorCode *string `min:"1" type:"string"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListApplicationDPUSizesInput struct { + _ struct{} `type:"structure"` + + // Specifies the maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListApplicationDPUSizesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListApplicationDPUSizesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListApplicationDPUSizesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListApplicationDPUSizesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListApplicationDPUSizesInput) SetMaxResults(v int64) *ListApplicationDPUSizesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListApplicationDPUSizesInput) SetNextToken(v string) *ListApplicationDPUSizesInput { + s.NextToken = &v + return s +} + +type ListApplicationDPUSizesOutput struct { + _ struct{} `type:"structure"` + + // A list of the supported DPU sizes that the application runtime supports. + ApplicationDPUSizes []*ApplicationDPUSizes `type:"list"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListApplicationDPUSizesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListApplicationDPUSizesOutput) GoString() string { + return s.String() +} + +// SetApplicationDPUSizes sets the ApplicationDPUSizes field's value. +func (s *ListApplicationDPUSizesOutput) SetApplicationDPUSizes(v []*ApplicationDPUSizes) *ListApplicationDPUSizesOutput { + s.ApplicationDPUSizes = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListApplicationDPUSizesOutput) SetNextToken(v string) *ListApplicationDPUSizesOutput { + s.NextToken = &v + return s +} + +type ListCalculationExecutionsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of calculation executions to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `type:"string"` + + // The session ID. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` + + // A filter for a specific calculation execution state. A description of each + // state follows. + // + // CREATING - The calculation is in the process of being created. + // + // CREATED - The calculation has been created and is ready to run. + // + // QUEUED - The calculation has been queued for processing. + // + // RUNNING - The calculation is running. + // + // CANCELING - A request to cancel the calculation has been received and the + // system is working to stop it. + // + // CANCELED - The calculation is no longer running as the result of a cancel + // request. + // + // COMPLETED - The calculation has completed without error. + // + // FAILED - The calculation failed and is no longer running. + StateFilter *string `type:"string" enum:"CalculationExecutionState"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCalculationExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCalculationExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCalculationExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCalculationExecutionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListCalculationExecutionsInput) SetMaxResults(v int64) *ListCalculationExecutionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCalculationExecutionsInput) SetNextToken(v string) *ListCalculationExecutionsInput { + s.NextToken = &v + return s +} + +// SetSessionId sets the SessionId field's value. +func (s *ListCalculationExecutionsInput) SetSessionId(v string) *ListCalculationExecutionsInput { + s.SessionId = &v + return s +} + +// SetStateFilter sets the StateFilter field's value. +func (s *ListCalculationExecutionsInput) SetStateFilter(v string) *ListCalculationExecutionsInput { + s.StateFilter = &v + return s +} + +type ListCalculationExecutionsOutput struct { + _ struct{} `type:"structure"` + + // A list of CalculationSummary objects. + Calculations []*CalculationSummary `type:"list"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCalculationExecutionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCalculationExecutionsOutput) GoString() string { + return s.String() +} + +// SetCalculations sets the Calculations field's value. +func (s *ListCalculationExecutionsOutput) SetCalculations(v []*CalculationSummary) *ListCalculationExecutionsOutput { + s.Calculations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCalculationExecutionsOutput) SetNextToken(v string) *ListCalculationExecutionsOutput { + s.NextToken = &v + return s +} + +type ListCapacityReservationsInput struct { + _ struct{} `type:"structure"` + + // Specifies the maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCapacityReservationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCapacityReservationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCapacityReservationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCapacityReservationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListCapacityReservationsInput) SetMaxResults(v int64) *ListCapacityReservationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCapacityReservationsInput) SetNextToken(v string) *ListCapacityReservationsInput { + s.NextToken = &v + return s +} + +type ListCapacityReservationsOutput struct { + _ struct{} `type:"structure"` + + // The capacity reservations for the current account. + // + // CapacityReservations is a required field + CapacityReservations []*CapacityReservation `type:"list" required:"true"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCapacityReservationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListCapacityReservationsOutput) GoString() string { + return s.String() +} + +// SetCapacityReservations sets the CapacityReservations field's value. +func (s *ListCapacityReservationsOutput) SetCapacityReservations(v []*CapacityReservation) *ListCapacityReservationsOutput { + s.CapacityReservations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCapacityReservationsOutput) SetNextToken(v string) *ListCapacityReservationsOutput { + s.NextToken = &v + return s +} + +type ListDataCatalogsInput struct { + _ struct{} `type:"structure"` + + // Specifies the maximum number of data catalogs to return. + MaxResults *int64 `min:"2" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The name of the workgroup. Required if making an IAM Identity Center request. + WorkGroup *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDataCatalogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDataCatalogsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDataCatalogsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDataCatalogsInput"} + if s.MaxResults != nil && *s.MaxResults < 2 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 2)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDataCatalogsInput) SetMaxResults(v int64) *ListDataCatalogsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataCatalogsInput) SetNextToken(v string) *ListDataCatalogsInput { + s.NextToken = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *ListDataCatalogsInput) SetWorkGroup(v string) *ListDataCatalogsInput { + s.WorkGroup = &v + return s +} + +type ListDataCatalogsOutput struct { + _ struct{} `type:"structure"` + + // A summary list of data catalogs. + DataCatalogsSummary []*DataCatalogSummary `type:"list"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDataCatalogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDataCatalogsOutput) GoString() string { + return s.String() +} + +// SetDataCatalogsSummary sets the DataCatalogsSummary field's value. +func (s *ListDataCatalogsOutput) SetDataCatalogsSummary(v []*DataCatalogSummary) *ListDataCatalogsOutput { + s.DataCatalogsSummary = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataCatalogsOutput) SetNextToken(v string) *ListDataCatalogsOutput { + s.NextToken = &v + return s +} + +type ListDatabasesInput struct { + _ struct{} `type:"structure"` + + // The name of the data catalog that contains the databases to return. + // + // CatalogName is a required field + CatalogName *string `min:"1" type:"string" required:"true"` + + // Specifies the maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The name of the workgroup for which the metadata is being fetched. Required + // if requesting an IAM Identity Center enabled Glue Data Catalog. + WorkGroup *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDatabasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDatabasesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetQueryExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetQueryExecutionInput"} - if s.QueryExecutionIds == nil { - invalidParams.Add(request.NewErrParamRequired("QueryExecutionIds")) +func (s *ListDatabasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatabasesInput"} + if s.CatalogName == nil { + invalidParams.Add(request.NewErrParamRequired("CatalogName")) } - if s.QueryExecutionIds != nil && len(s.QueryExecutionIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryExecutionIds", 1)) + if s.CatalogName != nil && len(*s.CatalogName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -3666,20 +12567,143 @@ func (s *BatchGetQueryExecutionInput) Validate() error { return nil } -// SetQueryExecutionIds sets the QueryExecutionIds field's value. -func (s *BatchGetQueryExecutionInput) SetQueryExecutionIds(v []*string) *BatchGetQueryExecutionInput { - s.QueryExecutionIds = v +// SetCatalogName sets the CatalogName field's value. +func (s *ListDatabasesInput) SetCatalogName(v string) *ListDatabasesInput { + s.CatalogName = &v return s } -type BatchGetQueryExecutionOutput struct { +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatabasesInput) SetMaxResults(v int64) *ListDatabasesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatabasesInput) SetNextToken(v string) *ListDatabasesInput { + s.NextToken = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *ListDatabasesInput) SetWorkGroup(v string) *ListDatabasesInput { + s.WorkGroup = &v + return s +} + +type ListDatabasesOutput struct { _ struct{} `type:"structure"` - // Information about a query execution. - QueryExecutions []*QueryExecution `type:"list"` + // A list of databases from a data catalog. + DatabaseList []*Database `type:"list"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDatabasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDatabasesOutput) GoString() string { + return s.String() +} + +// SetDatabaseList sets the DatabaseList field's value. +func (s *ListDatabasesOutput) SetDatabaseList(v []*Database) *ListDatabasesOutput { + s.DatabaseList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatabasesOutput) SetNextToken(v string) *ListDatabasesOutput { + s.NextToken = &v + return s +} + +type ListEngineVersionsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of engine versions to return in this request. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListEngineVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListEngineVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEngineVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEngineVersionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListEngineVersionsInput) SetMaxResults(v int64) *ListEngineVersionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEngineVersionsInput) SetNextToken(v string) *ListEngineVersionsInput { + s.NextToken = &v + return s +} + +type ListEngineVersionsOutput struct { + _ struct{} `type:"structure"` - // Information about the query executions that failed to run. - UnprocessedQueryExecutionIds []*UnprocessedQueryExecutionId `type:"list"` + // A list of engine versions that are available to choose from. + EngineVersions []*EngineVersion `type:"list"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` } // String returns the string representation. @@ -3687,7 +12711,7 @@ type BatchGetQueryExecutionOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s BatchGetQueryExecutionOutput) String() string { +func (s ListEngineVersionsOutput) String() string { return awsutil.Prettify(s) } @@ -3696,36 +12720,53 @@ func (s BatchGetQueryExecutionOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s BatchGetQueryExecutionOutput) GoString() string { +func (s ListEngineVersionsOutput) GoString() string { return s.String() } -// SetQueryExecutions sets the QueryExecutions field's value. -func (s *BatchGetQueryExecutionOutput) SetQueryExecutions(v []*QueryExecution) *BatchGetQueryExecutionOutput { - s.QueryExecutions = v +// SetEngineVersions sets the EngineVersions field's value. +func (s *ListEngineVersionsOutput) SetEngineVersions(v []*EngineVersion) *ListEngineVersionsOutput { + s.EngineVersions = v return s } -// SetUnprocessedQueryExecutionIds sets the UnprocessedQueryExecutionIds field's value. -func (s *BatchGetQueryExecutionOutput) SetUnprocessedQueryExecutionIds(v []*UnprocessedQueryExecutionId) *BatchGetQueryExecutionOutput { - s.UnprocessedQueryExecutionIds = v +// SetNextToken sets the NextToken field's value. +func (s *ListEngineVersionsOutput) SetNextToken(v string) *ListEngineVersionsOutput { + s.NextToken = &v return s } -// Contains metadata for a column in a table. -type Column struct { +type ListExecutorsInput struct { _ struct{} `type:"structure"` - // Optional information about the column. - Comment *string `type:"string"` - - // The name of the column. + // A filter for a specific executor state. A description of each state follows. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // CREATING - The executor is being started, including acquiring resources. + // + // CREATED - The executor has been started. + // + // REGISTERED - The executor has been registered. + // + // TERMINATING - The executor is in the process of shutting down. + // + // TERMINATED - The executor is no longer running. + // + // FAILED - Due to a failure, the executor is no longer running. + ExecutorStateFilter *string `type:"string" enum:"ExecutorState"` - // The data type of the column. - Type *string `type:"string"` + // The maximum number of executors to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `type:"string"` + + // The session ID. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -3733,7 +12774,7 @@ type Column struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Column) String() string { +func (s ListExecutorsInput) String() string { return awsutil.Prettify(s) } @@ -3742,67 +12783,69 @@ func (s Column) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Column) GoString() string { +func (s ListExecutorsInput) GoString() string { return s.String() } -// SetComment sets the Comment field's value. -func (s *Column) SetComment(v string) *Column { - s.Comment = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListExecutorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListExecutorsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetName sets the Name field's value. -func (s *Column) SetName(v string) *Column { - s.Name = &v +// SetExecutorStateFilter sets the ExecutorStateFilter field's value. +func (s *ListExecutorsInput) SetExecutorStateFilter(v string) *ListExecutorsInput { + s.ExecutorStateFilter = &v return s } -// SetType sets the Type field's value. -func (s *Column) SetType(v string) *Column { - s.Type = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListExecutorsInput) SetMaxResults(v int64) *ListExecutorsInput { + s.MaxResults = &v return s } -// Information about the columns in a query execution result. -type ColumnInfo struct { - _ struct{} `type:"structure"` - - // Indicates whether values in the column are case-sensitive. - CaseSensitive *bool `type:"boolean"` - - // The catalog to which the query results belong. - CatalogName *string `type:"string"` - - // A column label. - Label *string `type:"string"` - - // The name of the column. - // - // Name is a required field - Name *string `type:"string" required:"true"` - - // Indicates the column's nullable status. - Nullable *string `type:"string" enum:"ColumnNullable"` +// SetNextToken sets the NextToken field's value. +func (s *ListExecutorsInput) SetNextToken(v string) *ListExecutorsInput { + s.NextToken = &v + return s +} - // For DECIMAL data types, specifies the total number of digits, up to 38. For - // performance reasons, we recommend up to 18 digits. - Precision *int64 `type:"integer"` +// SetSessionId sets the SessionId field's value. +func (s *ListExecutorsInput) SetSessionId(v string) *ListExecutorsInput { + s.SessionId = &v + return s +} - // For DECIMAL data types, specifies the total number of digits in the fractional - // part of the value. Defaults to 0. - Scale *int64 `type:"integer"` +type ListExecutorsOutput struct { + _ struct{} `type:"structure"` - // The schema name (database name) to which the query results belong. - SchemaName *string `type:"string"` + // Contains summary information about the executor. + ExecutorsSummary []*ExecutorsSummary `type:"list"` - // The table name for the query results. - TableName *string `type:"string"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `type:"string"` - // The data type of the column. + // The session ID. // - // Type is a required field - Type *string `type:"string" required:"true"` + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -3810,7 +12853,7 @@ type ColumnInfo struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ColumnInfo) String() string { +func (s ListExecutorsOutput) String() string { return awsutil.Prettify(s) } @@ -3819,116 +12862,44 @@ func (s ColumnInfo) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ColumnInfo) GoString() string { +func (s ListExecutorsOutput) GoString() string { return s.String() } -// SetCaseSensitive sets the CaseSensitive field's value. -func (s *ColumnInfo) SetCaseSensitive(v bool) *ColumnInfo { - s.CaseSensitive = &v - return s -} - -// SetCatalogName sets the CatalogName field's value. -func (s *ColumnInfo) SetCatalogName(v string) *ColumnInfo { - s.CatalogName = &v - return s -} - -// SetLabel sets the Label field's value. -func (s *ColumnInfo) SetLabel(v string) *ColumnInfo { - s.Label = &v - return s -} - -// SetName sets the Name field's value. -func (s *ColumnInfo) SetName(v string) *ColumnInfo { - s.Name = &v - return s -} - -// SetNullable sets the Nullable field's value. -func (s *ColumnInfo) SetNullable(v string) *ColumnInfo { - s.Nullable = &v - return s -} - -// SetPrecision sets the Precision field's value. -func (s *ColumnInfo) SetPrecision(v int64) *ColumnInfo { - s.Precision = &v - return s -} - -// SetScale sets the Scale field's value. -func (s *ColumnInfo) SetScale(v int64) *ColumnInfo { - s.Scale = &v - return s -} - -// SetSchemaName sets the SchemaName field's value. -func (s *ColumnInfo) SetSchemaName(v string) *ColumnInfo { - s.SchemaName = &v +// SetExecutorsSummary sets the ExecutorsSummary field's value. +func (s *ListExecutorsOutput) SetExecutorsSummary(v []*ExecutorsSummary) *ListExecutorsOutput { + s.ExecutorsSummary = v return s } -// SetTableName sets the TableName field's value. -func (s *ColumnInfo) SetTableName(v string) *ColumnInfo { - s.TableName = &v +// SetNextToken sets the NextToken field's value. +func (s *ListExecutorsOutput) SetNextToken(v string) *ListExecutorsOutput { + s.NextToken = &v return s } -// SetType sets the Type field's value. -func (s *ColumnInfo) SetType(v string) *ColumnInfo { - s.Type = &v +// SetSessionId sets the SessionId field's value. +func (s *ListExecutorsOutput) SetSessionId(v string) *ListExecutorsOutput { + s.SessionId = &v return s } -type CreateDataCatalogInput struct { +type ListNamedQueriesInput struct { _ struct{} `type:"structure"` - // A description of the data catalog to be created. - Description *string `min:"1" type:"string"` - - // The name of the data catalog to create. The catalog name must be unique for - // the Amazon Web Services account and can use a maximum of 128 alphanumeric, - // underscore, at sign, or hyphen characters. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Specifies the Lambda function or functions to use for creating the data catalog. - // This is a mapping whose values depend on the catalog type. - // - // * For the HIVE data catalog type, use the following syntax. The metadata-function - // parameter is required. The sdk-version parameter is optional and defaults - // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number - // - // * For the LAMBDA data catalog type, use one of the following sets of required - // parameters, but not both. If you have one Lambda function that processes - // metadata and another for reading the actual data, use the following syntax. - // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn - // If you have a composite Lambda function that processes both metadata and - // data, use the following syntax to specify your Lambda function. function=lambda_arn - // - // * The GLUE type takes a catalog ID parameter and is required. The catalog_id - // is the account ID of the Amazon Web Services account to which the Glue - // Data Catalog belongs. catalog-id=catalog_id The GLUE data catalog type - // also applies to the default AwsDataCatalog that already exists in your - // account, of which you can have only one and cannot modify. Queries that - // specify a Glue Data Catalog other than the default AwsDataCatalog must - // be run on Athena engine version 2. In Regions where Athena engine version - // 2 is not available, creating new Glue data catalogs results in an INVALID_INPUT - // error. - Parameters map[string]*string `type:"map"` + // The maximum number of queries to return in this request. + MaxResults *int64 `type:"integer"` - // A list of comma separated tags to add to the data catalog that is created. - Tags []*Tag `type:"list"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` - // The type of data catalog to create: LAMBDA for a federated catalog, HIVE - // for an external hive metastore, or GLUE for an Glue Data Catalog. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"DataCatalogType"` + // The name of the workgroup from which the named queries are being returned. + // If a workgroup is not specified, the saved queries for the primary workgroup + // are returned. + WorkGroup *string `type:"string"` } // String returns the string representation. @@ -3936,43 +12907,24 @@ type CreateDataCatalogInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateDataCatalogInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s CreateDataCatalogInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDataCatalogInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDataCatalogInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } +func (s ListNamedQueriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListNamedQueriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListNamedQueriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListNamedQueriesInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -3981,38 +12933,35 @@ func (s *CreateDataCatalogInput) Validate() error { return nil } -// SetDescription sets the Description field's value. -func (s *CreateDataCatalogInput) SetDescription(v string) *CreateDataCatalogInput { - s.Description = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListNamedQueriesInput) SetMaxResults(v int64) *ListNamedQueriesInput { + s.MaxResults = &v return s } -// SetName sets the Name field's value. -func (s *CreateDataCatalogInput) SetName(v string) *CreateDataCatalogInput { - s.Name = &v +// SetNextToken sets the NextToken field's value. +func (s *ListNamedQueriesInput) SetNextToken(v string) *ListNamedQueriesInput { + s.NextToken = &v return s } -// SetParameters sets the Parameters field's value. -func (s *CreateDataCatalogInput) SetParameters(v map[string]*string) *CreateDataCatalogInput { - s.Parameters = v +// SetWorkGroup sets the WorkGroup field's value. +func (s *ListNamedQueriesInput) SetWorkGroup(v string) *ListNamedQueriesInput { + s.WorkGroup = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateDataCatalogInput) SetTags(v []*Tag) *CreateDataCatalogInput { - s.Tags = v - return s -} +type ListNamedQueriesOutput struct { + _ struct{} `type:"structure"` -// SetType sets the Type field's value. -func (s *CreateDataCatalogInput) SetType(v string) *CreateDataCatalogInput { - s.Type = &v - return s -} + // The list of unique query IDs. + NamedQueryIds []*string `min:"1" type:"list"` -type CreateDataCatalogOutput struct { - _ struct{} `type:"structure"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` } // String returns the string representation. @@ -4020,7 +12969,7 @@ type CreateDataCatalogOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateDataCatalogOutput) String() string { +func (s ListNamedQueriesOutput) String() string { return awsutil.Prettify(s) } @@ -4029,44 +12978,39 @@ func (s CreateDataCatalogOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateDataCatalogOutput) GoString() string { +func (s ListNamedQueriesOutput) GoString() string { return s.String() } -type CreateNamedQueryInput struct { - _ struct{} `type:"structure"` +// SetNamedQueryIds sets the NamedQueryIds field's value. +func (s *ListNamedQueriesOutput) SetNamedQueryIds(v []*string) *ListNamedQueriesOutput { + s.NamedQueryIds = v + return s +} - // A unique case-sensitive string used to ensure the request to create the query - // is idempotent (executes only once). If another CreateNamedQuery request is - // received, the same response is returned and another query is not created. - // If a parameter has changed, for example, the QueryString, an error is returned. - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // users. If you are not using the Amazon Web Services SDK or the Amazon Web - // Services CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` +// SetNextToken sets the NextToken field's value. +func (s *ListNamedQueriesOutput) SetNextToken(v string) *ListNamedQueriesOutput { + s.NextToken = &v + return s +} - // The database to which the query belongs. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` +type ListNotebookMetadataInput struct { + _ struct{} `type:"structure"` - // The query description. - Description *string `min:"1" type:"string"` + // Search filter string. + Filters *FilterDefinition `type:"structure"` - // The query name. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // Specifies the maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` - // The contents of the query with all query statements. - // - // QueryString is a required field - QueryString *string `min:"1" type:"string" required:"true"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. + NextToken *string `min:"1" type:"string"` - // The name of the workgroup in which the named query is being created. - WorkGroup *string `type:"string"` + // The name of the Spark enabled workgroup to retrieve notebook metadata for. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` } // String returns the string representation. @@ -4074,7 +13018,7 @@ type CreateNamedQueryInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateNamedQueryInput) String() string { +func (s ListNotebookMetadataInput) String() string { return awsutil.Prettify(s) } @@ -4083,36 +13027,26 @@ func (s CreateNamedQueryInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateNamedQueryInput) GoString() string { +func (s ListNotebookMetadataInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateNamedQueryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateNamedQueryInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) - } - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *ListNotebookMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListNotebookMetadataInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } - if s.QueryString == nil { - invalidParams.Add(request.NewErrParamRequired("QueryString")) + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) } - if s.QueryString != nil && len(*s.QueryString) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) + if s.Filters != nil { + if err := s.Filters.Validate(); err != nil { + invalidParams.AddNested("Filters", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -4121,47 +13055,41 @@ func (s *CreateNamedQueryInput) Validate() error { return nil } -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *CreateNamedQueryInput) SetClientRequestToken(v string) *CreateNamedQueryInput { - s.ClientRequestToken = &v - return s -} - -// SetDatabase sets the Database field's value. -func (s *CreateNamedQueryInput) SetDatabase(v string) *CreateNamedQueryInput { - s.Database = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateNamedQueryInput) SetDescription(v string) *CreateNamedQueryInput { - s.Description = &v +// SetFilters sets the Filters field's value. +func (s *ListNotebookMetadataInput) SetFilters(v *FilterDefinition) *ListNotebookMetadataInput { + s.Filters = v return s } -// SetName sets the Name field's value. -func (s *CreateNamedQueryInput) SetName(v string) *CreateNamedQueryInput { - s.Name = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListNotebookMetadataInput) SetMaxResults(v int64) *ListNotebookMetadataInput { + s.MaxResults = &v return s } -// SetQueryString sets the QueryString field's value. -func (s *CreateNamedQueryInput) SetQueryString(v string) *CreateNamedQueryInput { - s.QueryString = &v +// SetNextToken sets the NextToken field's value. +func (s *ListNotebookMetadataInput) SetNextToken(v string) *ListNotebookMetadataInput { + s.NextToken = &v return s } // SetWorkGroup sets the WorkGroup field's value. -func (s *CreateNamedQueryInput) SetWorkGroup(v string) *CreateNamedQueryInput { +func (s *ListNotebookMetadataInput) SetWorkGroup(v string) *ListNotebookMetadataInput { s.WorkGroup = &v return s } -type CreateNamedQueryOutput struct { +type ListNotebookMetadataOutput struct { _ struct{} `type:"structure"` - // The unique ID of the query. - NamedQueryId *string `type:"string"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The list of notebook metadata for the specified workgroup. + NotebookMetadataList []*NotebookMetadata `type:"list"` } // String returns the string representation. @@ -4169,7 +13097,7 @@ type CreateNamedQueryOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateNamedQueryOutput) String() string { +func (s ListNotebookMetadataOutput) String() string { return awsutil.Prettify(s) } @@ -4178,36 +13106,38 @@ func (s CreateNamedQueryOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateNamedQueryOutput) GoString() string { +func (s ListNotebookMetadataOutput) GoString() string { return s.String() } -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *CreateNamedQueryOutput) SetNamedQueryId(v string) *CreateNamedQueryOutput { - s.NamedQueryId = &v +// SetNextToken sets the NextToken field's value. +func (s *ListNotebookMetadataOutput) SetNextToken(v string) *ListNotebookMetadataOutput { + s.NextToken = &v return s } -type CreatePreparedStatementInput struct { - _ struct{} `type:"structure"` +// SetNotebookMetadataList sets the NotebookMetadataList field's value. +func (s *ListNotebookMetadataOutput) SetNotebookMetadataList(v []*NotebookMetadata) *ListNotebookMetadataOutput { + s.NotebookMetadataList = v + return s +} - // The description of the prepared statement. - Description *string `min:"1" type:"string"` +type ListNotebookSessionsInput struct { + _ struct{} `type:"structure"` - // The query string for the prepared statement. - // - // QueryStatement is a required field - QueryStatement *string `min:"1" type:"string" required:"true"` + // The maximum number of notebook sessions to return. + MaxResults *int64 `min:"1" type:"integer"` - // The name of the prepared statement. - // - // StatementName is a required field - StatementName *string `min:"1" type:"string" required:"true"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` - // The name of the workgroup to which the prepared statement belongs. + // The ID of the notebook to list sessions for. // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` + // NotebookId is a required field + NotebookId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -4215,7 +13145,7 @@ type CreatePreparedStatementInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreatePreparedStatementInput) String() string { +func (s ListNotebookSessionsInput) String() string { return awsutil.Prettify(s) } @@ -4224,30 +13154,24 @@ func (s CreatePreparedStatementInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreatePreparedStatementInput) GoString() string { +func (s ListNotebookSessionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePreparedStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePreparedStatementInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.QueryStatement == nil { - invalidParams.Add(request.NewErrParamRequired("QueryStatement")) - } - if s.QueryStatement != nil && len(*s.QueryStatement) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryStatement", 1)) +func (s *ListNotebookSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListNotebookSessionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.StatementName == nil { - invalidParams.Add(request.NewErrParamRequired("StatementName")) + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } - if s.StatementName != nil && len(*s.StatementName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) + if s.NotebookId == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookId")) } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + if s.NotebookId != nil && len(*s.NotebookId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) } if invalidParams.Len() > 0 { @@ -4256,32 +13180,37 @@ func (s *CreatePreparedStatementInput) Validate() error { return nil } -// SetDescription sets the Description field's value. -func (s *CreatePreparedStatementInput) SetDescription(v string) *CreatePreparedStatementInput { - s.Description = &v - return s -} - -// SetQueryStatement sets the QueryStatement field's value. -func (s *CreatePreparedStatementInput) SetQueryStatement(v string) *CreatePreparedStatementInput { - s.QueryStatement = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListNotebookSessionsInput) SetMaxResults(v int64) *ListNotebookSessionsInput { + s.MaxResults = &v return s } -// SetStatementName sets the StatementName field's value. -func (s *CreatePreparedStatementInput) SetStatementName(v string) *CreatePreparedStatementInput { - s.StatementName = &v +// SetNextToken sets the NextToken field's value. +func (s *ListNotebookSessionsInput) SetNextToken(v string) *ListNotebookSessionsInput { + s.NextToken = &v return s } -// SetWorkGroup sets the WorkGroup field's value. -func (s *CreatePreparedStatementInput) SetWorkGroup(v string) *CreatePreparedStatementInput { - s.WorkGroup = &v +// SetNotebookId sets the NotebookId field's value. +func (s *ListNotebookSessionsInput) SetNotebookId(v string) *ListNotebookSessionsInput { + s.NotebookId = &v return s } -type CreatePreparedStatementOutput struct { +type ListNotebookSessionsOutput struct { _ struct{} `type:"structure"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // A list of the sessions belonging to the notebook. + // + // NotebookSessionsList is a required field + NotebookSessionsList []*NotebookSessionSummary `type:"list" required:"true"` } // String returns the string representation. @@ -4289,7 +13218,7 @@ type CreatePreparedStatementOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreatePreparedStatementOutput) String() string { +func (s ListNotebookSessionsOutput) String() string { return awsutil.Prettify(s) } @@ -4298,32 +13227,38 @@ func (s CreatePreparedStatementOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreatePreparedStatementOutput) GoString() string { +func (s ListNotebookSessionsOutput) GoString() string { return s.String() } -type CreateWorkGroupInput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListNotebookSessionsOutput) SetNextToken(v string) *ListNotebookSessionsOutput { + s.NextToken = &v + return s +} + +// SetNotebookSessionsList sets the NotebookSessionsList field's value. +func (s *ListNotebookSessionsOutput) SetNotebookSessionsList(v []*NotebookSessionSummary) *ListNotebookSessionsOutput { + s.NotebookSessionsList = v + return s +} + +type ListPreparedStatementsInput struct { _ struct{} `type:"structure"` - // The configuration for the workgroup, which includes the location in Amazon - // S3 where query results are stored, the encryption configuration, if any, - // used for encrypting query results, whether the Amazon CloudWatch Metrics - // are enabled for the workgroup, the limit for the amount of bytes scanned - // (cutoff) per query, if it is specified, and whether workgroup's settings - // (specified with EnforceWorkGroupConfiguration) in the WorkGroupConfiguration - // override client-side settings. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. - Configuration *WorkGroupConfiguration `type:"structure"` + // The maximum number of results to return in this request. + MaxResults *int64 `min:"1" type:"integer"` - // The workgroup description. - Description *string `type:"string"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` - // The workgroup name. + // The workgroup to list the prepared statements for. // - // Name is a required field - Name *string `type:"string" required:"true"` - - // A list of comma separated tags to add to the workgroup that is created. - Tags []*Tag `type:"list"` + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` } // String returns the string representation. @@ -4331,7 +13266,7 @@ type CreateWorkGroupInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateWorkGroupInput) String() string { +func (s ListPreparedStatementsInput) String() string { return awsutil.Prettify(s) } @@ -4340,30 +13275,21 @@ func (s CreateWorkGroupInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateWorkGroupInput) GoString() string { +func (s ListPreparedStatementsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateWorkGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateWorkGroupInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *ListPreparedStatementsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPreparedStatementsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.Configuration != nil { - if err := s.Configuration.Validate(); err != nil { - invalidParams.AddNested("Configuration", err.(request.ErrInvalidParams)) - } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) } if invalidParams.Len() > 0 { @@ -4372,32 +13298,35 @@ func (s *CreateWorkGroupInput) Validate() error { return nil } -// SetConfiguration sets the Configuration field's value. -func (s *CreateWorkGroupInput) SetConfiguration(v *WorkGroupConfiguration) *CreateWorkGroupInput { - s.Configuration = v - return s -} - -// SetDescription sets the Description field's value. -func (s *CreateWorkGroupInput) SetDescription(v string) *CreateWorkGroupInput { - s.Description = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListPreparedStatementsInput) SetMaxResults(v int64) *ListPreparedStatementsInput { + s.MaxResults = &v return s } -// SetName sets the Name field's value. -func (s *CreateWorkGroupInput) SetName(v string) *CreateWorkGroupInput { - s.Name = &v +// SetNextToken sets the NextToken field's value. +func (s *ListPreparedStatementsInput) SetNextToken(v string) *ListPreparedStatementsInput { + s.NextToken = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateWorkGroupInput) SetTags(v []*Tag) *CreateWorkGroupInput { - s.Tags = v +// SetWorkGroup sets the WorkGroup field's value. +func (s *ListPreparedStatementsInput) SetWorkGroup(v string) *ListPreparedStatementsInput { + s.WorkGroup = &v return s } -type CreateWorkGroupOutput struct { +type ListPreparedStatementsOutput struct { _ struct{} `type:"structure"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The list of prepared statements for the workgroup. + PreparedStatements []*PreparedStatementSummary `type:"list"` } // String returns the string representation. @@ -4405,7 +13334,7 @@ type CreateWorkGroupOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateWorkGroupOutput) String() string { +func (s ListPreparedStatementsOutput) String() string { return awsutil.Prettify(s) } @@ -4414,52 +13343,38 @@ func (s CreateWorkGroupOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s CreateWorkGroupOutput) GoString() string { +func (s ListPreparedStatementsOutput) GoString() string { return s.String() } -// Contains information about a data catalog in an Amazon Web Services account. -type DataCatalog struct { - _ struct{} `type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *ListPreparedStatementsOutput) SetNextToken(v string) *ListPreparedStatementsOutput { + s.NextToken = &v + return s +} - // An optional description of the data catalog. - Description *string `min:"1" type:"string"` +// SetPreparedStatements sets the PreparedStatements field's value. +func (s *ListPreparedStatementsOutput) SetPreparedStatements(v []*PreparedStatementSummary) *ListPreparedStatementsOutput { + s.PreparedStatements = v + return s +} - // The name of the data catalog. The catalog name must be unique for the Amazon - // Web Services account and can use a maximum of 128 alphanumeric, underscore, - // at sign, or hyphen characters. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` +type ListQueryExecutionsInput struct { + _ struct{} `type:"structure"` - // Specifies the Lambda function or functions to use for the data catalog. This - // is a mapping whose values depend on the catalog type. - // - // * For the HIVE data catalog type, use the following syntax. The metadata-function - // parameter is required. The sdk-version parameter is optional and defaults - // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number - // - // * For the LAMBDA data catalog type, use one of the following sets of required - // parameters, but not both. If you have one Lambda function that processes - // metadata and another for reading the actual data, use the following syntax. - // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn - // If you have a composite Lambda function that processes both metadata and - // data, use the following syntax to specify your Lambda function. function=lambda_arn - // - // * The GLUE type takes a catalog ID parameter and is required. The catalog_id - // is the account ID of the Amazon Web Services account to which the Glue - // catalog belongs. catalog-id=catalog_id The GLUE data catalog type also - // applies to the default AwsDataCatalog that already exists in your account, - // of which you can have only one and cannot modify. Queries that specify - // a Glue Data Catalog other than the default AwsDataCatalog must be run - // on Athena engine version 2. - Parameters map[string]*string `type:"map"` + // The maximum number of query executions to return in this request. + MaxResults *int64 `type:"integer"` - // The type of data catalog to create: LAMBDA for a federated catalog, HIVE - // for an external hive metastore, or GLUE for an Glue Data Catalog. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"DataCatalogType"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The name of the workgroup from which queries are being returned. If a workgroup + // is not specified, a list of available query execution IDs for the queries + // in the primary workgroup is returned. + WorkGroup *string `type:"string"` } // String returns the string representation. @@ -4467,7 +13382,7 @@ type DataCatalog struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DataCatalog) String() string { +func (s ListQueryExecutionsInput) String() string { return awsutil.Prettify(s) } @@ -4476,44 +13391,49 @@ func (s DataCatalog) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DataCatalog) GoString() string { +func (s ListQueryExecutionsInput) GoString() string { return s.String() } -// SetDescription sets the Description field's value. -func (s *DataCatalog) SetDescription(v string) *DataCatalog { - s.Description = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListQueryExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListQueryExecutionsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetName sets the Name field's value. -func (s *DataCatalog) SetName(v string) *DataCatalog { - s.Name = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListQueryExecutionsInput) SetMaxResults(v int64) *ListQueryExecutionsInput { + s.MaxResults = &v return s } -// SetParameters sets the Parameters field's value. -func (s *DataCatalog) SetParameters(v map[string]*string) *DataCatalog { - s.Parameters = v +// SetNextToken sets the NextToken field's value. +func (s *ListQueryExecutionsInput) SetNextToken(v string) *ListQueryExecutionsInput { + s.NextToken = &v return s } -// SetType sets the Type field's value. -func (s *DataCatalog) SetType(v string) *DataCatalog { - s.Type = &v +// SetWorkGroup sets the WorkGroup field's value. +func (s *ListQueryExecutionsInput) SetWorkGroup(v string) *ListQueryExecutionsInput { + s.WorkGroup = &v return s } -// The summary information for the data catalog, which includes its name and -// type. -type DataCatalogSummary struct { +type ListQueryExecutionsOutput struct { _ struct{} `type:"structure"` - // The name of the data catalog. - CatalogName *string `min:"1" type:"string"` + // A token to be used by the next request if this request is truncated. + NextToken *string `min:"1" type:"string"` - // The data catalog type. - Type *string `type:"string" enum:"DataCatalogType"` + // The unique IDs of each query execution as an array of strings. + QueryExecutionIds []*string `min:"1" type:"list"` } // String returns the string representation. @@ -4521,7 +13441,7 @@ type DataCatalogSummary struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DataCatalogSummary) String() string { +func (s ListQueryExecutionsOutput) String() string { return awsutil.Prettify(s) } @@ -4530,36 +13450,57 @@ func (s DataCatalogSummary) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DataCatalogSummary) GoString() string { +func (s ListQueryExecutionsOutput) GoString() string { return s.String() } -// SetCatalogName sets the CatalogName field's value. -func (s *DataCatalogSummary) SetCatalogName(v string) *DataCatalogSummary { - s.CatalogName = &v +// SetNextToken sets the NextToken field's value. +func (s *ListQueryExecutionsOutput) SetNextToken(v string) *ListQueryExecutionsOutput { + s.NextToken = &v return s } -// SetType sets the Type field's value. -func (s *DataCatalogSummary) SetType(v string) *DataCatalogSummary { - s.Type = &v +// SetQueryExecutionIds sets the QueryExecutionIds field's value. +func (s *ListQueryExecutionsOutput) SetQueryExecutionIds(v []*string) *ListQueryExecutionsOutput { + s.QueryExecutionIds = v return s } -// Contains metadata information for a database in a data catalog. -type Database struct { +type ListSessionsInput struct { _ struct{} `type:"structure"` - // An optional description of the database. - Description *string `min:"1" type:"string"` + // The maximum number of sessions to return. + MaxResults *int64 `min:"1" type:"integer"` - // The name of the database. + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `type:"string"` + + // A filter for a specific session state. A description of each state follows. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // CREATING - The session is being started, including acquiring resources. + // + // CREATED - The session has been started. + // + // IDLE - The session is able to accept a calculation. + // + // BUSY - The session is processing another task and is unable to accept a calculation. + // + // TERMINATING - The session is in the process of shutting down. + // + // TERMINATED - The session and its resources are no longer running. + // + // DEGRADED - The session has no healthy coordinators. + // + // FAILED - Due to a failure, the session and its resources are no longer running. + StateFilter *string `type:"string" enum:"SessionState"` - // A set of custom key/value pairs. - Parameters map[string]*string `type:"map"` + // The workgroup to which the session belongs. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` } // String returns the string representation. @@ -4567,7 +13508,7 @@ type Database struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Database) String() string { +func (s ListSessionsInput) String() string { return awsutil.Prettify(s) } @@ -4576,34 +13517,61 @@ func (s Database) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Database) GoString() string { +func (s ListSessionsInput) GoString() string { return s.String() } -// SetDescription sets the Description field's value. -func (s *Database) SetDescription(v string) *Database { - s.Description = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSessionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSessionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListSessionsInput) SetMaxResults(v int64) *ListSessionsInput { + s.MaxResults = &v return s } -// SetName sets the Name field's value. -func (s *Database) SetName(v string) *Database { - s.Name = &v +// SetNextToken sets the NextToken field's value. +func (s *ListSessionsInput) SetNextToken(v string) *ListSessionsInput { + s.NextToken = &v return s } -// SetParameters sets the Parameters field's value. -func (s *Database) SetParameters(v map[string]*string) *Database { - s.Parameters = v +// SetStateFilter sets the StateFilter field's value. +func (s *ListSessionsInput) SetStateFilter(v string) *ListSessionsInput { + s.StateFilter = &v return s } -// A piece of data (a field in the table). -type Datum struct { +// SetWorkGroup sets the WorkGroup field's value. +func (s *ListSessionsInput) SetWorkGroup(v string) *ListSessionsInput { + s.WorkGroup = &v + return s +} + +type ListSessionsOutput struct { _ struct{} `type:"structure"` - // The value of the datum. - VarCharValue *string `type:"string"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `type:"string"` + + // A list of sessions. + Sessions []*SessionSummary `type:"list"` } // String returns the string representation. @@ -4611,7 +13579,7 @@ type Datum struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Datum) String() string { +func (s ListSessionsOutput) String() string { return awsutil.Prettify(s) } @@ -4620,23 +13588,51 @@ func (s Datum) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Datum) GoString() string { +func (s ListSessionsOutput) GoString() string { return s.String() } -// SetVarCharValue sets the VarCharValue field's value. -func (s *Datum) SetVarCharValue(v string) *Datum { - s.VarCharValue = &v +// SetNextToken sets the NextToken field's value. +func (s *ListSessionsOutput) SetNextToken(v string) *ListSessionsOutput { + s.NextToken = &v return s } -type DeleteDataCatalogInput struct { +// SetSessions sets the Sessions field's value. +func (s *ListSessionsOutput) SetSessions(v []*SessionSummary) *ListSessionsOutput { + s.Sessions = v + return s +} + +type ListTableMetadataInput struct { _ struct{} `type:"structure"` - // The name of the data catalog to delete. + // The name of the data catalog for which table metadata should be returned. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // CatalogName is a required field + CatalogName *string `min:"1" type:"string" required:"true"` + + // The name of the database for which table metadata should be returned. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A regex filter that pattern-matches table names. If no expression is supplied, + // metadata for all tables are listed. + Expression *string `type:"string"` + + // Specifies the maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The name of the workgroup for which the metadata is being fetched. Required + // if requesting an IAM Identity Center enabled Glue Data Catalog. + WorkGroup *string `type:"string"` } // String returns the string representation. @@ -4644,7 +13640,7 @@ type DeleteDataCatalogInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteDataCatalogInput) String() string { +func (s ListTableMetadataInput) String() string { return awsutil.Prettify(s) } @@ -4653,18 +13649,30 @@ func (s DeleteDataCatalogInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteDataCatalogInput) GoString() string { +func (s ListTableMetadataInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDataCatalogInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDataCatalogInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *ListTableMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTableMetadataInput"} + if s.CatalogName == nil { + invalidParams.Add(request.NewErrParamRequired("CatalogName")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.CatalogName != nil && len(*s.CatalogName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -4673,39 +13681,53 @@ func (s *DeleteDataCatalogInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *DeleteDataCatalogInput) SetName(v string) *DeleteDataCatalogInput { - s.Name = &v +// SetCatalogName sets the CatalogName field's value. +func (s *ListTableMetadataInput) SetCatalogName(v string) *ListTableMetadataInput { + s.CatalogName = &v return s } -type DeleteDataCatalogOutput struct { - _ struct{} `type:"structure"` +// SetDatabaseName sets the DatabaseName field's value. +func (s *ListTableMetadataInput) SetDatabaseName(v string) *ListTableMetadataInput { + s.DatabaseName = &v + return s } -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteDataCatalogOutput) String() string { - return awsutil.Prettify(s) +// SetExpression sets the Expression field's value. +func (s *ListTableMetadataInput) SetExpression(v string) *ListTableMetadataInput { + s.Expression = &v + return s } -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteDataCatalogOutput) GoString() string { - return s.String() +// SetMaxResults sets the MaxResults field's value. +func (s *ListTableMetadataInput) SetMaxResults(v int64) *ListTableMetadataInput { + s.MaxResults = &v + return s } -type DeleteNamedQueryInput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListTableMetadataInput) SetNextToken(v string) *ListTableMetadataInput { + s.NextToken = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *ListTableMetadataInput) SetWorkGroup(v string) *ListTableMetadataInput { + s.WorkGroup = &v + return s +} + +type ListTableMetadataOutput struct { _ struct{} `type:"structure"` - // The unique ID of the query to delete. - NamedQueryId *string `type:"string" idempotencyToken:"true"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // A list of table metadata. + TableMetadataList []*TableMetadata `type:"list"` } // String returns the string representation. @@ -4713,7 +13735,7 @@ type DeleteNamedQueryInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteNamedQueryInput) String() string { +func (s ListTableMetadataOutput) String() string { return awsutil.Prettify(s) } @@ -4722,50 +13744,38 @@ func (s DeleteNamedQueryInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteNamedQueryInput) GoString() string { +func (s ListTableMetadataOutput) GoString() string { return s.String() } -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *DeleteNamedQueryInput) SetNamedQueryId(v string) *DeleteNamedQueryInput { - s.NamedQueryId = &v +// SetNextToken sets the NextToken field's value. +func (s *ListTableMetadataOutput) SetNextToken(v string) *ListTableMetadataOutput { + s.NextToken = &v return s } -type DeleteNamedQueryOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNamedQueryOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s DeleteNamedQueryOutput) GoString() string { - return s.String() +// SetTableMetadataList sets the TableMetadataList field's value. +func (s *ListTableMetadataOutput) SetTableMetadataList(v []*TableMetadata) *ListTableMetadataOutput { + s.TableMetadataList = v + return s } -type DeletePreparedStatementInput struct { +type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // The name of the prepared statement to delete. - // - // StatementName is a required field - StatementName *string `min:"1" type:"string" required:"true"` + // The maximum number of results to be returned per request that lists the tags + // for the resource. + MaxResults *int64 `min:"75" type:"integer"` - // The workgroup to which the statement to be deleted belongs. + // The token for the next set of results, or null if there are no additional + // results for this request, where the request lists the tags for the resource + // with the specified ARN. + NextToken *string `min:"1" type:"string"` + + // Lists the tags for the resource with the specified ARN. // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -4773,7 +13783,7 @@ type DeletePreparedStatementInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeletePreparedStatementInput) String() string { +func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } @@ -4782,21 +13792,24 @@ func (s DeletePreparedStatementInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeletePreparedStatementInput) GoString() string { +func (s ListTagsForResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePreparedStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePreparedStatementInput"} - if s.StatementName == nil { - invalidParams.Add(request.NewErrParamRequired("StatementName")) +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.MaxResults != nil && *s.MaxResults < 75 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 75)) } - if s.StatementName != nil && len(*s.StatementName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) } if invalidParams.Len() > 0 { @@ -4805,20 +13818,32 @@ func (s *DeletePreparedStatementInput) Validate() error { return nil } -// SetStatementName sets the StatementName field's value. -func (s *DeletePreparedStatementInput) SetStatementName(v string) *DeletePreparedStatementInput { - s.StatementName = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListTagsForResourceInput) SetMaxResults(v int64) *ListTagsForResourceInput { + s.MaxResults = &v return s } -// SetWorkGroup sets the WorkGroup field's value. -func (s *DeletePreparedStatementInput) SetWorkGroup(v string) *DeletePreparedStatementInput { - s.WorkGroup = &v +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v return s } -type DeletePreparedStatementOutput struct { +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v + return s +} + +type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` + + // A token to be used by the next request if this request is truncated. + NextToken *string `min:"1" type:"string"` + + // The list of tags associated with the specified resource. + Tags []*Tag `type:"list"` } // String returns the string representation. @@ -4826,7 +13851,7 @@ type DeletePreparedStatementOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeletePreparedStatementOutput) String() string { +func (s ListTagsForResourceOutput) String() string { return awsutil.Prettify(s) } @@ -4835,21 +13860,33 @@ func (s DeletePreparedStatementOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeletePreparedStatementOutput) GoString() string { +func (s ListTagsForResourceOutput) GoString() string { return s.String() } -type DeleteWorkGroupInput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +type ListWorkGroupsInput struct { _ struct{} `type:"structure"` - // The option to delete the workgroup and its contents even if the workgroup - // contains any named queries or query executions. - RecursiveDeleteOption *bool `type:"boolean"` + // The maximum number of workgroups to return in this request. + MaxResults *int64 `min:"1" type:"integer"` - // The unique name of the workgroup to delete. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` } // String returns the string representation. @@ -4857,7 +13894,7 @@ type DeleteWorkGroupInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteWorkGroupInput) String() string { +func (s ListWorkGroupsInput) String() string { return awsutil.Prettify(s) } @@ -4866,15 +13903,18 @@ func (s DeleteWorkGroupInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteWorkGroupInput) GoString() string { +func (s ListWorkGroupsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteWorkGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteWorkGroupInput"} - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) +func (s *ListWorkGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListWorkGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -4883,20 +13923,30 @@ func (s *DeleteWorkGroupInput) Validate() error { return nil } -// SetRecursiveDeleteOption sets the RecursiveDeleteOption field's value. -func (s *DeleteWorkGroupInput) SetRecursiveDeleteOption(v bool) *DeleteWorkGroupInput { - s.RecursiveDeleteOption = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListWorkGroupsInput) SetMaxResults(v int64) *ListWorkGroupsInput { + s.MaxResults = &v return s } -// SetWorkGroup sets the WorkGroup field's value. -func (s *DeleteWorkGroupInput) SetWorkGroup(v string) *DeleteWorkGroupInput { - s.WorkGroup = &v +// SetNextToken sets the NextToken field's value. +func (s *ListWorkGroupsInput) SetNextToken(v string) *ListWorkGroupsInput { + s.NextToken = &v return s } -type DeleteWorkGroupOutput struct { +type ListWorkGroupsOutput struct { _ struct{} `type:"structure"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // A list of WorkGroupSummary objects that include the names, descriptions, + // creation times, and states for each workgroup. + WorkGroups []*WorkGroupSummary `type:"list"` } // String returns the string representation. @@ -4904,7 +13954,7 @@ type DeleteWorkGroupOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteWorkGroupOutput) String() string { +func (s ListWorkGroupsOutput) String() string { return awsutil.Prettify(s) } @@ -4913,28 +13963,32 @@ func (s DeleteWorkGroupOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteWorkGroupOutput) GoString() string { +func (s ListWorkGroupsOutput) GoString() string { return s.String() } -// If query results are encrypted in Amazon S3, indicates the encryption option -// used (for example, SSE-KMS or CSE-KMS) and key information. -type EncryptionConfiguration struct { - _ struct{} `type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *ListWorkGroupsOutput) SetNextToken(v string) *ListWorkGroupsOutput { + s.NextToken = &v + return s +} - // Indicates whether Amazon S3 server-side encryption with Amazon S3-managed - // keys (SSE-S3), server-side encryption with KMS-managed keys (SSE-KMS), or - // client-side encryption with KMS-managed keys (CSE-KMS) is used. - // - // If a query runs in a workgroup and the workgroup overrides client-side settings, - // then the workgroup's setting for encryption is used. It specifies whether - // query results must be encrypted, for all queries that run in this workgroup. - // - // EncryptionOption is a required field - EncryptionOption *string `type:"string" required:"true" enum:"EncryptionOption"` +// SetWorkGroups sets the WorkGroups field's value. +func (s *ListWorkGroupsOutput) SetWorkGroups(v []*WorkGroupSummary) *ListWorkGroupsOutput { + s.WorkGroups = v + return s +} - // For SSE-KMS and CSE-KMS, this is the KMS key ARN or ID. - KmsKey *string `type:"string"` +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +type MetadataException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` } // String returns the string representation. @@ -4942,7 +13996,7 @@ type EncryptionConfiguration struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s EncryptionConfiguration) String() string { +func (s MetadataException) String() string { return awsutil.Prettify(s) } @@ -4951,50 +14005,75 @@ func (s EncryptionConfiguration) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s EncryptionConfiguration) GoString() string { +func (s MetadataException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *EncryptionConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EncryptionConfiguration"} - if s.EncryptionOption == nil { - invalidParams.Add(request.NewErrParamRequired("EncryptionOption")) +func newErrorMetadataException(v protocol.ResponseMetadata) error { + return &MetadataException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams +// Code returns the exception type name. +func (s *MetadataException) Code() string { + return "MetadataException" +} + +// Message returns the exception's message. +func (s *MetadataException) Message() string { + if s.Message_ != nil { + return *s.Message_ } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *MetadataException) OrigErr() error { return nil } -// SetEncryptionOption sets the EncryptionOption field's value. -func (s *EncryptionConfiguration) SetEncryptionOption(v string) *EncryptionConfiguration { - s.EncryptionOption = &v - return s +func (s *MetadataException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// SetKmsKey sets the KmsKey field's value. -func (s *EncryptionConfiguration) SetKmsKey(v string) *EncryptionConfiguration { - s.KmsKey = &v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *MetadataException) StatusCode() int { + return s.RespMetadata.StatusCode } -// The Athena engine version for running queries. -type EngineVersion struct { +// RequestID returns the service's response RequestID for request. +func (s *MetadataException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A query, where QueryString contains the SQL statements that make up the query. +type NamedQuery struct { _ struct{} `type:"structure"` - // Read only. The engine version on which the query runs. If the user requests - // a valid engine version other than Auto, the effective engine version is the - // same as the engine version that the user requested. If the user requests - // Auto, the effective engine version is chosen by Athena. When a request to - // update the engine version is made by a CreateWorkGroup or UpdateWorkGroup - // operation, the EffectiveEngineVersion field is ignored. - EffectiveEngineVersion *string `min:"1" type:"string"` + // The database to which the query belongs. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` - // The engine version requested by the user. Possible values are determined - // by the output of ListEngineVersions, including Auto. The default is Auto. - SelectedEngineVersion *string `min:"1" type:"string"` + // The query description. + Description *string `min:"1" type:"string"` + + // The query name. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The unique identifier of the query. + NamedQueryId *string `min:"1" type:"string"` + + // The SQL statements that make up the query. + // + // QueryString is a required field + QueryString *string `min:"1" type:"string" required:"true"` + + // The name of the workgroup that contains the named query. + WorkGroup *string `type:"string"` } // String returns the string representation. @@ -5002,7 +14081,7 @@ type EngineVersion struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s EngineVersion) String() string { +func (s NamedQuery) String() string { return awsutil.Prettify(s) } @@ -5011,45 +14090,68 @@ func (s EngineVersion) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s EngineVersion) GoString() string { +func (s NamedQuery) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *EngineVersion) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EngineVersion"} - if s.EffectiveEngineVersion != nil && len(*s.EffectiveEngineVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EffectiveEngineVersion", 1)) - } - if s.SelectedEngineVersion != nil && len(*s.SelectedEngineVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SelectedEngineVersion", 1)) - } +// SetDatabase sets the Database field's value. +func (s *NamedQuery) SetDatabase(v string) *NamedQuery { + s.Database = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDescription sets the Description field's value. +func (s *NamedQuery) SetDescription(v string) *NamedQuery { + s.Description = &v + return s } -// SetEffectiveEngineVersion sets the EffectiveEngineVersion field's value. -func (s *EngineVersion) SetEffectiveEngineVersion(v string) *EngineVersion { - s.EffectiveEngineVersion = &v +// SetName sets the Name field's value. +func (s *NamedQuery) SetName(v string) *NamedQuery { + s.Name = &v return s } -// SetSelectedEngineVersion sets the SelectedEngineVersion field's value. -func (s *EngineVersion) SetSelectedEngineVersion(v string) *EngineVersion { - s.SelectedEngineVersion = &v +// SetNamedQueryId sets the NamedQueryId field's value. +func (s *NamedQuery) SetNamedQueryId(v string) *NamedQuery { + s.NamedQueryId = &v return s } -type GetDataCatalogInput struct { +// SetQueryString sets the QueryString field's value. +func (s *NamedQuery) SetQueryString(v string) *NamedQuery { + s.QueryString = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *NamedQuery) SetWorkGroup(v string) *NamedQuery { + s.WorkGroup = &v + return s +} + +// Contains metadata for notebook, including the notebook name, ID, workgroup, +// and time created. +type NotebookMetadata struct { _ struct{} `type:"structure"` - // The name of the data catalog to return. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The time when the notebook was created. + CreationTime *time.Time `type:"timestamp"` + + // The time when the notebook was last modified. + LastModifiedTime *time.Time `type:"timestamp"` + + // The name of the notebook. + Name *string `min:"1" type:"string"` + + // The notebook ID. + NotebookId *string `min:"1" type:"string"` + + // The type of notebook. Currently, the only valid type is IPYNB. + Type *string `type:"string" enum:"NotebookType"` + + // The name of the Spark enabled workgroup to which the notebook belongs. + WorkGroup *string `type:"string"` } // String returns the string representation. @@ -5057,7 +14159,7 @@ type GetDataCatalogInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetDataCatalogInput) String() string { +func (s NotebookMetadata) String() string { return awsutil.Prettify(s) } @@ -5066,37 +14168,55 @@ func (s GetDataCatalogInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetDataCatalogInput) GoString() string { +func (s NotebookMetadata) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDataCatalogInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDataCatalogInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } +// SetCreationTime sets the CreationTime field's value. +func (s *NotebookMetadata) SetCreationTime(v time.Time) *NotebookMetadata { + s.CreationTime = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *NotebookMetadata) SetLastModifiedTime(v time.Time) *NotebookMetadata { + s.LastModifiedTime = &v + return s } // SetName sets the Name field's value. -func (s *GetDataCatalogInput) SetName(v string) *GetDataCatalogInput { +func (s *NotebookMetadata) SetName(v string) *NotebookMetadata { s.Name = &v return s } -type GetDataCatalogOutput struct { +// SetNotebookId sets the NotebookId field's value. +func (s *NotebookMetadata) SetNotebookId(v string) *NotebookMetadata { + s.NotebookId = &v + return s +} + +// SetType sets the Type field's value. +func (s *NotebookMetadata) SetType(v string) *NotebookMetadata { + s.Type = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *NotebookMetadata) SetWorkGroup(v string) *NotebookMetadata { + s.WorkGroup = &v + return s +} + +// Contains the notebook session ID and notebook session creation time. +type NotebookSessionSummary struct { _ struct{} `type:"structure"` - // The data catalog returned. - DataCatalog *DataCatalog `type:"structure"` + // The time when the notebook session was created. + CreationTime *time.Time `type:"timestamp"` + + // The notebook session ID. + SessionId *string `min:"1" type:"string"` } // String returns the string representation. @@ -5104,7 +14224,7 @@ type GetDataCatalogOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetDataCatalogOutput) String() string { +func (s NotebookSessionSummary) String() string { return awsutil.Prettify(s) } @@ -5113,28 +14233,40 @@ func (s GetDataCatalogOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetDataCatalogOutput) GoString() string { +func (s NotebookSessionSummary) GoString() string { return s.String() } -// SetDataCatalog sets the DataCatalog field's value. -func (s *GetDataCatalogOutput) SetDataCatalog(v *DataCatalog) *GetDataCatalogOutput { - s.DataCatalog = v +// SetCreationTime sets the CreationTime field's value. +func (s *NotebookSessionSummary) SetCreationTime(v time.Time) *NotebookSessionSummary { + s.CreationTime = &v return s } -type GetDatabaseInput struct { +// SetSessionId sets the SessionId field's value. +func (s *NotebookSessionSummary) SetSessionId(v string) *NotebookSessionSummary { + s.SessionId = &v + return s +} + +// A prepared SQL statement for use with Athena. +type PreparedStatement struct { _ struct{} `type:"structure"` - // The name of the data catalog that contains the database to return. - // - // CatalogName is a required field - CatalogName *string `min:"1" type:"string" required:"true"` + // The description of the prepared statement. + Description *string `min:"1" type:"string"` - // The name of the database to return. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // The last modified time of the prepared statement. + LastModifiedTime *time.Time `type:"timestamp"` + + // The query string for the prepared statement. + QueryStatement *string `min:"1" type:"string"` + + // The name of the prepared statement. + StatementName *string `min:"1" type:"string"` + + // The name of the workgroup to which the prepared statement belongs. + WorkGroupName *string `type:"string"` } // String returns the string representation. @@ -5142,7 +14274,7 @@ type GetDatabaseInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetDatabaseInput) String() string { +func (s PreparedStatement) String() string { return awsutil.Prettify(s) } @@ -5151,49 +14283,49 @@ func (s GetDatabaseInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetDatabaseInput) GoString() string { +func (s PreparedStatement) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDatabaseInput"} - if s.CatalogName == nil { - invalidParams.Add(request.NewErrParamRequired("CatalogName")) - } - if s.CatalogName != nil && len(*s.CatalogName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } +// SetDescription sets the Description field's value. +func (s *PreparedStatement) SetDescription(v string) *PreparedStatement { + s.Description = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *PreparedStatement) SetLastModifiedTime(v time.Time) *PreparedStatement { + s.LastModifiedTime = &v + return s } -// SetCatalogName sets the CatalogName field's value. -func (s *GetDatabaseInput) SetCatalogName(v string) *GetDatabaseInput { - s.CatalogName = &v +// SetQueryStatement sets the QueryStatement field's value. +func (s *PreparedStatement) SetQueryStatement(v string) *PreparedStatement { + s.QueryStatement = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetDatabaseInput) SetDatabaseName(v string) *GetDatabaseInput { - s.DatabaseName = &v +// SetStatementName sets the StatementName field's value. +func (s *PreparedStatement) SetStatementName(v string) *PreparedStatement { + s.StatementName = &v return s } -type GetDatabaseOutput struct { +// SetWorkGroupName sets the WorkGroupName field's value. +func (s *PreparedStatement) SetWorkGroupName(v string) *PreparedStatement { + s.WorkGroupName = &v + return s +} + +// The name and last modified time of the prepared statement. +type PreparedStatementSummary struct { _ struct{} `type:"structure"` - // The database returned. - Database *Database `type:"structure"` + // The last modified time of the prepared statement. + LastModifiedTime *time.Time `type:"timestamp"` + + // The name of the prepared statement. + StatementName *string `min:"1" type:"string"` } // String returns the string representation. @@ -5201,7 +14333,7 @@ type GetDatabaseOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetDatabaseOutput) String() string { +func (s PreparedStatementSummary) String() string { return awsutil.Prettify(s) } @@ -5210,23 +14342,35 @@ func (s GetDatabaseOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetDatabaseOutput) GoString() string { +func (s PreparedStatementSummary) GoString() string { return s.String() } -// SetDatabase sets the Database field's value. -func (s *GetDatabaseOutput) SetDatabase(v *Database) *GetDatabaseOutput { - s.Database = v +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *PreparedStatementSummary) SetLastModifiedTime(v time.Time) *PreparedStatementSummary { + s.LastModifiedTime = &v return s } -type GetNamedQueryInput struct { +// SetStatementName sets the StatementName field's value. +func (s *PreparedStatementSummary) SetStatementName(v string) *PreparedStatementSummary { + s.StatementName = &v + return s +} + +type PutCapacityAssignmentConfigurationInput struct { _ struct{} `type:"structure"` - // The unique ID of the query. Use ListNamedQueries to get query IDs. + // The list of assignments for the capacity assignment configuration. // - // NamedQueryId is a required field - NamedQueryId *string `type:"string" required:"true"` + // CapacityAssignments is a required field + CapacityAssignments []*CapacityAssignment `type:"list" required:"true"` + + // The name of the capacity reservation to put a capacity assignment configuration + // for. + // + // CapacityReservationName is a required field + CapacityReservationName *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -5234,7 +14378,7 @@ type GetNamedQueryInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetNamedQueryInput) String() string { +func (s PutCapacityAssignmentConfigurationInput) String() string { return awsutil.Prettify(s) } @@ -5243,15 +14387,21 @@ func (s GetNamedQueryInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetNamedQueryInput) GoString() string { +func (s PutCapacityAssignmentConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetNamedQueryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetNamedQueryInput"} - if s.NamedQueryId == nil { - invalidParams.Add(request.NewErrParamRequired("NamedQueryId")) +func (s *PutCapacityAssignmentConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutCapacityAssignmentConfigurationInput"} + if s.CapacityAssignments == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityAssignments")) + } + if s.CapacityReservationName == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationName")) + } + if s.CapacityReservationName != nil && len(*s.CapacityReservationName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CapacityReservationName", 1)) } if invalidParams.Len() > 0 { @@ -5260,17 +14410,20 @@ func (s *GetNamedQueryInput) Validate() error { return nil } -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *GetNamedQueryInput) SetNamedQueryId(v string) *GetNamedQueryInput { - s.NamedQueryId = &v +// SetCapacityAssignments sets the CapacityAssignments field's value. +func (s *PutCapacityAssignmentConfigurationInput) SetCapacityAssignments(v []*CapacityAssignment) *PutCapacityAssignmentConfigurationInput { + s.CapacityAssignments = v return s } -type GetNamedQueryOutput struct { - _ struct{} `type:"structure"` +// SetCapacityReservationName sets the CapacityReservationName field's value. +func (s *PutCapacityAssignmentConfigurationInput) SetCapacityReservationName(v string) *PutCapacityAssignmentConfigurationInput { + s.CapacityReservationName = &v + return s +} - // Information about the query. - NamedQuery *NamedQuery `type:"structure"` +type PutCapacityAssignmentConfigurationOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation. @@ -5278,7 +14431,7 @@ type GetNamedQueryOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetNamedQueryOutput) String() string { +func (s PutCapacityAssignmentConfigurationOutput) String() string { return awsutil.Prettify(s) } @@ -5287,28 +14440,64 @@ func (s GetNamedQueryOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetNamedQueryOutput) GoString() string { +func (s PutCapacityAssignmentConfigurationOutput) GoString() string { return s.String() } -// SetNamedQuery sets the NamedQuery field's value. -func (s *GetNamedQueryOutput) SetNamedQuery(v *NamedQuery) *GetNamedQueryOutput { - s.NamedQuery = v - return s -} - -type GetPreparedStatementInput struct { +// Information about a single instance of a query execution. +type QueryExecution struct { _ struct{} `type:"structure"` - // The name of the prepared statement to retrieve. - // - // StatementName is a required field - StatementName *string `min:"1" type:"string" required:"true"` + // The engine version that executed the query. + EngineVersion *EngineVersion `type:"structure"` - // The workgroup to which the statement to be retrieved belongs. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` + // A list of values for the parameters in a query. The values are applied sequentially + // to the parameters in the query in the order in which the parameters occur. + // The list of parameters is not returned in the response. + ExecutionParameters []*string `min:"1" type:"list"` + + // The SQL query statements which the query execution ran. + Query *string `min:"1" type:"string"` + + // The database in which the query execution occurred. + QueryExecutionContext *QueryExecutionContext `type:"structure"` + + // The unique identifier for each query execution. + QueryExecutionId *string `min:"1" type:"string"` + + // Specifies whether Amazon S3 access grants are enabled for query results. + QueryResultsS3AccessGrantsConfiguration *QueryResultsS3AccessGrantsConfiguration `type:"structure"` + + // The location in Amazon S3 where query and calculation results are stored + // and the encryption option, if any, used for query results. These are known + // as "client-side settings". If workgroup settings override client-side settings, + // then the query uses the location for the query results and the encryption + // configuration that are specified for the workgroup. + ResultConfiguration *ResultConfiguration `type:"structure"` + + // Specifies the query result reuse behavior that was used for the query. + ResultReuseConfiguration *ResultReuseConfiguration `type:"structure"` + + // The type of query statement that was run. DDL indicates DDL query statements. + // DML indicates DML (Data Manipulation Language) query statements, such as + // CREATE TABLE AS SELECT. UTILITY indicates query statements other than DDL + // and DML, such as SHOW CREATE TABLE, or DESCRIBE TABLE. + StatementType *string `type:"string" enum:"StatementType"` + + // Query execution statistics, such as the amount of data scanned, the amount + // of time that the query took to process, and the type of statement that was + // run. + Statistics *QueryExecutionStatistics `type:"structure"` + + // The completion date, current state, submission time, and state change reason + // (if applicable) for the query execution. + Status *QueryExecutionStatus `type:"structure"` + + // The kind of query statement that was run. + SubstatementType *string `type:"string"` + + // The name of the workgroup in which the query ran. + WorkGroup *string `type:"string"` } // String returns the string representation. @@ -5316,7 +14505,7 @@ type GetPreparedStatementInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetPreparedStatementInput) String() string { +func (s QueryExecution) String() string { return awsutil.Prettify(s) } @@ -5325,79 +14514,98 @@ func (s GetPreparedStatementInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetPreparedStatementInput) GoString() string { +func (s QueryExecution) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPreparedStatementInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPreparedStatementInput"} - if s.StatementName == nil { - invalidParams.Add(request.NewErrParamRequired("StatementName")) - } - if s.StatementName != nil && len(*s.StatementName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } +// SetEngineVersion sets the EngineVersion field's value. +func (s *QueryExecution) SetEngineVersion(v *EngineVersion) *QueryExecution { + s.EngineVersion = v + return s +} + +// SetExecutionParameters sets the ExecutionParameters field's value. +func (s *QueryExecution) SetExecutionParameters(v []*string) *QueryExecution { + s.ExecutionParameters = v + return s +} + +// SetQuery sets the Query field's value. +func (s *QueryExecution) SetQuery(v string) *QueryExecution { + s.Query = &v + return s +} + +// SetQueryExecutionContext sets the QueryExecutionContext field's value. +func (s *QueryExecution) SetQueryExecutionContext(v *QueryExecutionContext) *QueryExecution { + s.QueryExecutionContext = v + return s +} + +// SetQueryExecutionId sets the QueryExecutionId field's value. +func (s *QueryExecution) SetQueryExecutionId(v string) *QueryExecution { + s.QueryExecutionId = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetQueryResultsS3AccessGrantsConfiguration sets the QueryResultsS3AccessGrantsConfiguration field's value. +func (s *QueryExecution) SetQueryResultsS3AccessGrantsConfiguration(v *QueryResultsS3AccessGrantsConfiguration) *QueryExecution { + s.QueryResultsS3AccessGrantsConfiguration = v + return s } -// SetStatementName sets the StatementName field's value. -func (s *GetPreparedStatementInput) SetStatementName(v string) *GetPreparedStatementInput { - s.StatementName = &v +// SetResultConfiguration sets the ResultConfiguration field's value. +func (s *QueryExecution) SetResultConfiguration(v *ResultConfiguration) *QueryExecution { + s.ResultConfiguration = v return s } -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetPreparedStatementInput) SetWorkGroup(v string) *GetPreparedStatementInput { - s.WorkGroup = &v +// SetResultReuseConfiguration sets the ResultReuseConfiguration field's value. +func (s *QueryExecution) SetResultReuseConfiguration(v *ResultReuseConfiguration) *QueryExecution { + s.ResultReuseConfiguration = v return s } -type GetPreparedStatementOutput struct { - _ struct{} `type:"structure"` +// SetStatementType sets the StatementType field's value. +func (s *QueryExecution) SetStatementType(v string) *QueryExecution { + s.StatementType = &v + return s +} - // The name of the prepared statement that was retrieved. - PreparedStatement *PreparedStatement `type:"structure"` +// SetStatistics sets the Statistics field's value. +func (s *QueryExecution) SetStatistics(v *QueryExecutionStatistics) *QueryExecution { + s.Statistics = v + return s } -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPreparedStatementOutput) String() string { - return awsutil.Prettify(s) +// SetStatus sets the Status field's value. +func (s *QueryExecution) SetStatus(v *QueryExecutionStatus) *QueryExecution { + s.Status = v + return s } -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetPreparedStatementOutput) GoString() string { - return s.String() +// SetSubstatementType sets the SubstatementType field's value. +func (s *QueryExecution) SetSubstatementType(v string) *QueryExecution { + s.SubstatementType = &v + return s } -// SetPreparedStatement sets the PreparedStatement field's value. -func (s *GetPreparedStatementOutput) SetPreparedStatement(v *PreparedStatement) *GetPreparedStatementOutput { - s.PreparedStatement = v +// SetWorkGroup sets the WorkGroup field's value. +func (s *QueryExecution) SetWorkGroup(v string) *QueryExecution { + s.WorkGroup = &v return s } -type GetQueryExecutionInput struct { +// The database and data catalog context in which the query execution occurs. +type QueryExecutionContext struct { _ struct{} `type:"structure"` - // The unique ID of the query execution. - // - // QueryExecutionId is a required field - QueryExecutionId *string `type:"string" required:"true"` + // The name of the data catalog used in the query execution. + Catalog *string `min:"1" type:"string"` + + // The name of the database used in the query execution. The database must exist + // in the catalog. + Database *string `min:"1" type:"string"` } // String returns the string representation. @@ -5405,7 +14613,7 @@ type GetQueryExecutionInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetQueryExecutionInput) String() string { +func (s QueryExecutionContext) String() string { return awsutil.Prettify(s) } @@ -5414,15 +14622,18 @@ func (s GetQueryExecutionInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetQueryExecutionInput) GoString() string { +func (s QueryExecutionContext) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetQueryExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetQueryExecutionInput"} - if s.QueryExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) +func (s *QueryExecutionContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueryExecutionContext"} + if s.Catalog != nil && len(*s.Catalog) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Catalog", 1)) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) } if invalidParams.Len() > 0 { @@ -5431,17 +14642,64 @@ func (s *GetQueryExecutionInput) Validate() error { return nil } -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *GetQueryExecutionInput) SetQueryExecutionId(v string) *GetQueryExecutionInput { - s.QueryExecutionId = &v +// SetCatalog sets the Catalog field's value. +func (s *QueryExecutionContext) SetCatalog(v string) *QueryExecutionContext { + s.Catalog = &v return s } -type GetQueryExecutionOutput struct { +// SetDatabase sets the Database field's value. +func (s *QueryExecutionContext) SetDatabase(v string) *QueryExecutionContext { + s.Database = &v + return s +} + +// The amount of data scanned during the query execution and the amount of time +// that it took to execute, and the type of statement that was run. +type QueryExecutionStatistics struct { _ struct{} `type:"structure"` - // Information about the query execution. - QueryExecution *QueryExecution `type:"structure"` + // The location and file name of a data manifest file. The manifest file is + // saved to the Athena query results location in Amazon S3. The manifest file + // tracks files that the query wrote to Amazon S3. If the query fails, the manifest + // file also tracks files that the query intended to write. The manifest is + // useful for identifying orphaned files resulting from a failed query. For + // more information, see Working with Query Results, Output Files, and Query + // History (https://docs.aws.amazon.com/athena/latest/ug/querying.html) in the + // Amazon Athena User Guide. + DataManifestLocation *string `type:"string"` + + // The number of bytes in the data that was queried. + DataScannedInBytes *int64 `type:"long"` + + // The number of milliseconds that the query took to execute. + EngineExecutionTimeInMillis *int64 `type:"long"` + + // The number of milliseconds that Athena took to plan the query processing + // flow. This includes the time spent retrieving table partitions from the data + // source. Note that because the query engine performs the query planning, query + // planning time is a subset of engine processing time. + QueryPlanningTimeInMillis *int64 `type:"long"` + + // The number of milliseconds that the query was in your query queue waiting + // for resources. Note that if transient errors occur, Athena might automatically + // add the query back to the queue. + QueryQueueTimeInMillis *int64 `type:"long"` + + // Contains information about whether previous query results were reused for + // the query. + ResultReuseInformation *ResultReuseInformation `type:"structure"` + + // The number of milliseconds that Athena took to preprocess the query before + // submitting the query to the query engine. + ServicePreProcessingTimeInMillis *int64 `type:"long"` + + // The number of milliseconds that Athena took to finalize and publish the query + // results after the query engine finished running the query. + ServiceProcessingTimeInMillis *int64 `type:"long"` + + // The number of milliseconds that Athena took to run the query. + TotalExecutionTimeInMillis *int64 `type:"long"` } // String returns the string representation. @@ -5449,7 +14707,7 @@ type GetQueryExecutionOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetQueryExecutionOutput) String() string { +func (s QueryExecutionStatistics) String() string { return awsutil.Prettify(s) } @@ -5458,103 +14716,92 @@ func (s GetQueryExecutionOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetQueryExecutionOutput) GoString() string { +func (s QueryExecutionStatistics) GoString() string { return s.String() } -// SetQueryExecution sets the QueryExecution field's value. -func (s *GetQueryExecutionOutput) SetQueryExecution(v *QueryExecution) *GetQueryExecutionOutput { - s.QueryExecution = v +// SetDataManifestLocation sets the DataManifestLocation field's value. +func (s *QueryExecutionStatistics) SetDataManifestLocation(v string) *QueryExecutionStatistics { + s.DataManifestLocation = &v return s } -type GetQueryResultsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of results (rows) to return in this request. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The unique ID of the query execution. - // - // QueryExecutionId is a required field - QueryExecutionId *string `type:"string" required:"true"` +// SetDataScannedInBytes sets the DataScannedInBytes field's value. +func (s *QueryExecutionStatistics) SetDataScannedInBytes(v int64) *QueryExecutionStatistics { + s.DataScannedInBytes = &v + return s } -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryResultsInput) String() string { - return awsutil.Prettify(s) +// SetEngineExecutionTimeInMillis sets the EngineExecutionTimeInMillis field's value. +func (s *QueryExecutionStatistics) SetEngineExecutionTimeInMillis(v int64) *QueryExecutionStatistics { + s.EngineExecutionTimeInMillis = &v + return s } -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetQueryResultsInput) GoString() string { - return s.String() +// SetQueryPlanningTimeInMillis sets the QueryPlanningTimeInMillis field's value. +func (s *QueryExecutionStatistics) SetQueryPlanningTimeInMillis(v int64) *QueryExecutionStatistics { + s.QueryPlanningTimeInMillis = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetQueryResultsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetQueryResultsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.QueryExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) - } +// SetQueryQueueTimeInMillis sets the QueryQueueTimeInMillis field's value. +func (s *QueryExecutionStatistics) SetQueryQueueTimeInMillis(v int64) *QueryExecutionStatistics { + s.QueryQueueTimeInMillis = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetResultReuseInformation sets the ResultReuseInformation field's value. +func (s *QueryExecutionStatistics) SetResultReuseInformation(v *ResultReuseInformation) *QueryExecutionStatistics { + s.ResultReuseInformation = v + return s } -// SetMaxResults sets the MaxResults field's value. -func (s *GetQueryResultsInput) SetMaxResults(v int64) *GetQueryResultsInput { - s.MaxResults = &v +// SetServicePreProcessingTimeInMillis sets the ServicePreProcessingTimeInMillis field's value. +func (s *QueryExecutionStatistics) SetServicePreProcessingTimeInMillis(v int64) *QueryExecutionStatistics { + s.ServicePreProcessingTimeInMillis = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetQueryResultsInput) SetNextToken(v string) *GetQueryResultsInput { - s.NextToken = &v +// SetServiceProcessingTimeInMillis sets the ServiceProcessingTimeInMillis field's value. +func (s *QueryExecutionStatistics) SetServiceProcessingTimeInMillis(v int64) *QueryExecutionStatistics { + s.ServiceProcessingTimeInMillis = &v return s } -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *GetQueryResultsInput) SetQueryExecutionId(v string) *GetQueryResultsInput { - s.QueryExecutionId = &v +// SetTotalExecutionTimeInMillis sets the TotalExecutionTimeInMillis field's value. +func (s *QueryExecutionStatistics) SetTotalExecutionTimeInMillis(v int64) *QueryExecutionStatistics { + s.TotalExecutionTimeInMillis = &v return s } -type GetQueryResultsOutput struct { +// The completion date, current state, submission time, and state change reason +// (if applicable) for the query execution. +type QueryExecutionStatus struct { _ struct{} `type:"structure"` - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + // Provides information about an Athena query error. + AthenaError *AthenaError `type:"structure"` - // The results of the query execution. - ResultSet *ResultSet `type:"structure"` + // The date and time that the query completed. + CompletionDateTime *time.Time `type:"timestamp"` - // The number of rows inserted with a CREATE TABLE AS SELECT statement. - UpdateCount *int64 `type:"long"` + // The state of query execution. QUEUED indicates that the query has been submitted + // to the service, and Athena will execute the query as soon as resources are + // available. RUNNING indicates that the query is in execution phase. SUCCEEDED + // indicates that the query completed without errors. FAILED indicates that + // the query experienced an error and did not complete processing. CANCELLED + // indicates that a user input interrupted query execution. + // + // Athena automatically retries your queries in cases of certain transient errors. + // As a result, you may see the query state transition from RUNNING or FAILED + // to QUEUED. + State *string `type:"string" enum:"QueryExecutionState"` + + // Further detail about the status of the query. + StateChangeReason *string `type:"string"` + + // The date and time that the query was submitted. + SubmissionDateTime *time.Time `type:"timestamp"` } // String returns the string representation. @@ -5562,7 +14809,7 @@ type GetQueryResultsOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetQueryResultsOutput) String() string { +func (s QueryExecutionStatus) String() string { return awsutil.Prettify(s) } @@ -5571,46 +14818,58 @@ func (s GetQueryResultsOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetQueryResultsOutput) GoString() string { +func (s QueryExecutionStatus) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *GetQueryResultsOutput) SetNextToken(v string) *GetQueryResultsOutput { - s.NextToken = &v +// SetAthenaError sets the AthenaError field's value. +func (s *QueryExecutionStatus) SetAthenaError(v *AthenaError) *QueryExecutionStatus { + s.AthenaError = v return s } -// SetResultSet sets the ResultSet field's value. -func (s *GetQueryResultsOutput) SetResultSet(v *ResultSet) *GetQueryResultsOutput { - s.ResultSet = v +// SetCompletionDateTime sets the CompletionDateTime field's value. +func (s *QueryExecutionStatus) SetCompletionDateTime(v time.Time) *QueryExecutionStatus { + s.CompletionDateTime = &v return s } -// SetUpdateCount sets the UpdateCount field's value. -func (s *GetQueryResultsOutput) SetUpdateCount(v int64) *GetQueryResultsOutput { - s.UpdateCount = &v +// SetState sets the State field's value. +func (s *QueryExecutionStatus) SetState(v string) *QueryExecutionStatus { + s.State = &v return s } -type GetTableMetadataInput struct { +// SetStateChangeReason sets the StateChangeReason field's value. +func (s *QueryExecutionStatus) SetStateChangeReason(v string) *QueryExecutionStatus { + s.StateChangeReason = &v + return s +} + +// SetSubmissionDateTime sets the SubmissionDateTime field's value. +func (s *QueryExecutionStatus) SetSubmissionDateTime(v time.Time) *QueryExecutionStatus { + s.SubmissionDateTime = &v + return s +} + +// Specifies whether Amazon S3 access grants are enabled for query results. +type QueryResultsS3AccessGrantsConfiguration struct { _ struct{} `type:"structure"` - // The name of the data catalog that contains the database and table metadata - // to return. + // The authentication type used for Amazon S3 access grants. Currently, only + // DIRECTORY_IDENTITY is supported. // - // CatalogName is a required field - CatalogName *string `min:"1" type:"string" required:"true"` + // AuthenticationType is a required field + AuthenticationType *string `type:"string" required:"true" enum:"AuthenticationType"` - // The name of the database that contains the table metadata to return. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // When enabled, appends the user ID as an Amazon S3 path prefix to the query + // result output location. + CreateUserLevelPrefix *bool `type:"boolean"` - // The name of the table for which metadata is returned. + // Specifies whether Amazon S3 access grants are enabled for query results. // - // TableName is a required field - TableName *string `min:"1" type:"string" required:"true"` + // EnableS3AccessGrants is a required field + EnableS3AccessGrants *bool `type:"boolean" required:"true"` } // String returns the string representation. @@ -5618,7 +14877,7 @@ type GetTableMetadataInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetTableMetadataInput) String() string { +func (s QueryResultsS3AccessGrantsConfiguration) String() string { return awsutil.Prettify(s) } @@ -5627,30 +14886,18 @@ func (s GetTableMetadataInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetTableMetadataInput) GoString() string { +func (s QueryResultsS3AccessGrantsConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetTableMetadataInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetTableMetadataInput"} - if s.CatalogName == nil { - invalidParams.Add(request.NewErrParamRequired("CatalogName")) - } - if s.CatalogName != nil && len(*s.CatalogName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) +func (s *QueryResultsS3AccessGrantsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueryResultsS3AccessGrantsConfiguration"} + if s.AuthenticationType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationType")) } - if s.TableName != nil && len(*s.TableName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + if s.EnableS3AccessGrants == nil { + invalidParams.Add(request.NewErrParamRequired("EnableS3AccessGrants")) } if invalidParams.Len() > 0 { @@ -5659,29 +14906,41 @@ func (s *GetTableMetadataInput) Validate() error { return nil } -// SetCatalogName sets the CatalogName field's value. -func (s *GetTableMetadataInput) SetCatalogName(v string) *GetTableMetadataInput { - s.CatalogName = &v +// SetAuthenticationType sets the AuthenticationType field's value. +func (s *QueryResultsS3AccessGrantsConfiguration) SetAuthenticationType(v string) *QueryResultsS3AccessGrantsConfiguration { + s.AuthenticationType = &v return s } -// SetDatabaseName sets the DatabaseName field's value. -func (s *GetTableMetadataInput) SetDatabaseName(v string) *GetTableMetadataInput { - s.DatabaseName = &v +// SetCreateUserLevelPrefix sets the CreateUserLevelPrefix field's value. +func (s *QueryResultsS3AccessGrantsConfiguration) SetCreateUserLevelPrefix(v bool) *QueryResultsS3AccessGrantsConfiguration { + s.CreateUserLevelPrefix = &v return s } -// SetTableName sets the TableName field's value. -func (s *GetTableMetadataInput) SetTableName(v string) *GetTableMetadataInput { - s.TableName = &v +// SetEnableS3AccessGrants sets the EnableS3AccessGrants field's value. +func (s *QueryResultsS3AccessGrantsConfiguration) SetEnableS3AccessGrants(v bool) *QueryResultsS3AccessGrantsConfiguration { + s.EnableS3AccessGrants = &v return s } -type GetTableMetadataOutput struct { +// The query execution timeline, statistics on input and output rows and bytes, +// and the different query stages that form the query execution plan. +type QueryRuntimeStatistics struct { _ struct{} `type:"structure"` - // An object that contains table metadata. - TableMetadata *TableMetadata `type:"structure"` + // Stage statistics such as input and output rows and bytes, execution time, + // and stage state. This information also includes substages and the query stage + // plan. + OutputStage *QueryStage `type:"structure"` + + // Statistics such as input rows and bytes read by the query, rows and bytes + // output by the query, and the number of rows written by the query. + Rows *QueryRuntimeStatisticsRows `type:"structure"` + + // Timeline statistics such as query queue time, planning time, execution time, + // service processing time, and total execution time. + Timeline *QueryRuntimeStatisticsTimeline `type:"structure"` } // String returns the string representation. @@ -5689,7 +14948,7 @@ type GetTableMetadataOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetTableMetadataOutput) String() string { +func (s QueryRuntimeStatistics) String() string { return awsutil.Prettify(s) } @@ -5698,23 +14957,44 @@ func (s GetTableMetadataOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetTableMetadataOutput) GoString() string { +func (s QueryRuntimeStatistics) GoString() string { return s.String() } -// SetTableMetadata sets the TableMetadata field's value. -func (s *GetTableMetadataOutput) SetTableMetadata(v *TableMetadata) *GetTableMetadataOutput { - s.TableMetadata = v +// SetOutputStage sets the OutputStage field's value. +func (s *QueryRuntimeStatistics) SetOutputStage(v *QueryStage) *QueryRuntimeStatistics { + s.OutputStage = v return s } -type GetWorkGroupInput struct { +// SetRows sets the Rows field's value. +func (s *QueryRuntimeStatistics) SetRows(v *QueryRuntimeStatisticsRows) *QueryRuntimeStatistics { + s.Rows = v + return s +} + +// SetTimeline sets the Timeline field's value. +func (s *QueryRuntimeStatistics) SetTimeline(v *QueryRuntimeStatisticsTimeline) *QueryRuntimeStatistics { + s.Timeline = v + return s +} + +// Statistics such as input rows and bytes read by the query, rows and bytes +// output by the query, and the number of rows written by the query. +type QueryRuntimeStatisticsRows struct { _ struct{} `type:"structure"` - // The name of the workgroup. - // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` + // The number of bytes read to execute the query. + InputBytes *int64 `type:"long"` + + // The number of rows read to execute the query. + InputRows *int64 `type:"long"` + + // The number of bytes returned by the query. + OutputBytes *int64 `type:"long"` + + // The number of rows returned by the query. + OutputRows *int64 `type:"long"` } // String returns the string representation. @@ -5722,7 +15002,7 @@ type GetWorkGroupInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetWorkGroupInput) String() string { +func (s QueryRuntimeStatisticsRows) String() string { return awsutil.Prettify(s) } @@ -5731,67 +15011,63 @@ func (s GetWorkGroupInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s GetWorkGroupInput) GoString() string { +func (s QueryRuntimeStatisticsRows) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetWorkGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetWorkGroupInput"} - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } +// SetInputBytes sets the InputBytes field's value. +func (s *QueryRuntimeStatisticsRows) SetInputBytes(v int64) *QueryRuntimeStatisticsRows { + s.InputBytes = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetInputRows sets the InputRows field's value. +func (s *QueryRuntimeStatisticsRows) SetInputRows(v int64) *QueryRuntimeStatisticsRows { + s.InputRows = &v + return s } -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetWorkGroupInput) SetWorkGroup(v string) *GetWorkGroupInput { - s.WorkGroup = &v +// SetOutputBytes sets the OutputBytes field's value. +func (s *QueryRuntimeStatisticsRows) SetOutputBytes(v int64) *QueryRuntimeStatisticsRows { + s.OutputBytes = &v return s } -type GetWorkGroupOutput struct { +// SetOutputRows sets the OutputRows field's value. +func (s *QueryRuntimeStatisticsRows) SetOutputRows(v int64) *QueryRuntimeStatisticsRows { + s.OutputRows = &v + return s +} + +// Timeline statistics such as query queue time, planning time, execution time, +// service processing time, and total execution time. +type QueryRuntimeStatisticsTimeline struct { _ struct{} `type:"structure"` - // Information about the workgroup. - WorkGroup *WorkGroup `type:"structure"` -} + // The number of milliseconds that the query took to execute. + EngineExecutionTimeInMillis *int64 `type:"long"` -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetWorkGroupOutput) String() string { - return awsutil.Prettify(s) -} + // The number of milliseconds that Athena took to plan the query processing + // flow. This includes the time spent retrieving table partitions from the data + // source. Note that because the query engine performs the query planning, query + // planning time is a subset of engine processing time. + QueryPlanningTimeInMillis *int64 `type:"long"` -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s GetWorkGroupOutput) GoString() string { - return s.String() -} + // The number of milliseconds that the query was in your query queue waiting + // for resources. Note that if transient errors occur, Athena might automatically + // add the query back to the queue. + QueryQueueTimeInMillis *int64 `type:"long"` -// SetWorkGroup sets the WorkGroup field's value. -func (s *GetWorkGroupOutput) SetWorkGroup(v *WorkGroup) *GetWorkGroupOutput { - s.WorkGroup = v - return s -} + // The number of milliseconds that Athena spends on preprocessing before it + // submits the query to the engine. + ServicePreProcessingTimeInMillis *int64 `type:"long"` -// Indicates a platform issue, which may be due to a transient condition or -// outage. -type InternalServerException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // The number of milliseconds that Athena took to finalize and publish the query + // results after the query engine finished running the query. + ServiceProcessingTimeInMillis *int64 `type:"long"` - Message_ *string `locationName:"Message" type:"string"` + // The number of milliseconds that Athena took to run the query. + TotalExecutionTimeInMillis *int64 `type:"long"` } // String returns the string representation. @@ -5799,7 +15075,7 @@ type InternalServerException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InternalServerException) String() string { +func (s QueryRuntimeStatisticsTimeline) String() string { return awsutil.Prettify(s) } @@ -5808,59 +15084,78 @@ func (s InternalServerException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InternalServerException) GoString() string { +func (s QueryRuntimeStatisticsTimeline) GoString() string { return s.String() } -func newErrorInternalServerException(v protocol.ResponseMetadata) error { - return &InternalServerException{ - RespMetadata: v, - } +// SetEngineExecutionTimeInMillis sets the EngineExecutionTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetEngineExecutionTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.EngineExecutionTimeInMillis = &v + return s } -// Code returns the exception type name. -func (s *InternalServerException) Code() string { - return "InternalServerException" +// SetQueryPlanningTimeInMillis sets the QueryPlanningTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetQueryPlanningTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.QueryPlanningTimeInMillis = &v + return s } -// Message returns the exception's message. -func (s *InternalServerException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetQueryQueueTimeInMillis sets the QueryQueueTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetQueryQueueTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.QueryQueueTimeInMillis = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InternalServerException) OrigErr() error { - return nil +// SetServicePreProcessingTimeInMillis sets the ServicePreProcessingTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetServicePreProcessingTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.ServicePreProcessingTimeInMillis = &v + return s } -func (s *InternalServerException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetServiceProcessingTimeInMillis sets the ServiceProcessingTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetServiceProcessingTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.ServiceProcessingTimeInMillis = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *InternalServerException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetTotalExecutionTimeInMillis sets the TotalExecutionTimeInMillis field's value. +func (s *QueryRuntimeStatisticsTimeline) SetTotalExecutionTimeInMillis(v int64) *QueryRuntimeStatisticsTimeline { + s.TotalExecutionTimeInMillis = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s *InternalServerException) RequestID() string { - return s.RespMetadata.RequestID -} +// Stage statistics such as input and output rows and bytes, execution time +// and stage state. This information also includes substages and the query stage +// plan. +type QueryStage struct { + _ struct{} `type:"structure"` -// Indicates that something is wrong with the input to the request. For example, -// a required parameter may be missing or out of range. -type InvalidRequestException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // Time taken to execute this stage. + ExecutionTime *int64 `type:"long"` - // The error code returned when the query execution failed to process, or when - // the processing request for the named query failed. - AthenaErrorCode *string `min:"1" type:"string"` + // The number of bytes input into the stage for execution. + InputBytes *int64 `type:"long"` - Message_ *string `locationName:"Message" type:"string"` + // The number of rows input into the stage for execution. + InputRows *int64 `type:"long"` + + // The number of bytes output from the stage after execution. + OutputBytes *int64 `type:"long"` + + // The number of rows output from the stage after execution. + OutputRows *int64 `type:"long"` + + // Stage plan information such as name, identifier, sub plans, and source stages. + QueryStagePlan *QueryStagePlanNode `type:"structure"` + + // The identifier for a stage. + StageId *int64 `type:"long"` + + // State of the stage after query execution. + State *string `type:"string"` + + // List of sub query stages that form this stage execution plan. + SubStages []*QueryStage `type:"list"` } // String returns the string representation. @@ -5868,7 +15163,7 @@ type InvalidRequestException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InvalidRequestException) String() string { +func (s QueryStage) String() string { return awsutil.Prettify(s) } @@ -5877,59 +15172,81 @@ func (s InvalidRequestException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s InvalidRequestException) GoString() string { +func (s QueryStage) GoString() string { return s.String() } -func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { - return &InvalidRequestException{ - RespMetadata: v, - } +// SetExecutionTime sets the ExecutionTime field's value. +func (s *QueryStage) SetExecutionTime(v int64) *QueryStage { + s.ExecutionTime = &v + return s } -// Code returns the exception type name. -func (s *InvalidRequestException) Code() string { - return "InvalidRequestException" +// SetInputBytes sets the InputBytes field's value. +func (s *QueryStage) SetInputBytes(v int64) *QueryStage { + s.InputBytes = &v + return s } -// Message returns the exception's message. -func (s *InvalidRequestException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetInputRows sets the InputRows field's value. +func (s *QueryStage) SetInputRows(v int64) *QueryStage { + s.InputRows = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *InvalidRequestException) OrigErr() error { - return nil +// SetOutputBytes sets the OutputBytes field's value. +func (s *QueryStage) SetOutputBytes(v int64) *QueryStage { + s.OutputBytes = &v + return s } -func (s *InvalidRequestException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetOutputRows sets the OutputRows field's value. +func (s *QueryStage) SetOutputRows(v int64) *QueryStage { + s.OutputRows = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *InvalidRequestException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetQueryStagePlan sets the QueryStagePlan field's value. +func (s *QueryStage) SetQueryStagePlan(v *QueryStagePlanNode) *QueryStage { + s.QueryStagePlan = v + return s +} + +// SetStageId sets the StageId field's value. +func (s *QueryStage) SetStageId(v int64) *QueryStage { + s.StageId = &v + return s +} + +// SetState sets the State field's value. +func (s *QueryStage) SetState(v string) *QueryStage { + s.State = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s *InvalidRequestException) RequestID() string { - return s.RespMetadata.RequestID +// SetSubStages sets the SubStages field's value. +func (s *QueryStage) SetSubStages(v []*QueryStage) *QueryStage { + s.SubStages = v + return s } -type ListDataCatalogsInput struct { +// Stage plan information such as name, identifier, sub plans, and remote sources. +type QueryStagePlanNode struct { _ struct{} `type:"structure"` - // Specifies the maximum number of data catalogs to return. - MaxResults *int64 `min:"2" type:"integer"` + // Stage plan information such as name, identifier, sub plans, and remote sources + // of child plan nodes/ + Children []*QueryStagePlanNode `type:"list"` - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + // Information about the operation this query stage plan node is performing. + Identifier *string `type:"string"` + + // Name of the query stage plan that describes the operation this stage is performing + // as part of query execution. + Name *string `type:"string"` + + // Source plan node IDs. + RemoteSources []*string `type:"list"` } // String returns the string representation. @@ -5937,7 +15254,7 @@ type ListDataCatalogsInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListDataCatalogsInput) String() string { +func (s QueryStagePlanNode) String() string { return awsutil.Prettify(s) } @@ -5946,49 +15263,43 @@ func (s ListDataCatalogsInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListDataCatalogsInput) GoString() string { +func (s QueryStagePlanNode) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListDataCatalogsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDataCatalogsInput"} - if s.MaxResults != nil && *s.MaxResults < 2 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 2)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } +// SetChildren sets the Children field's value. +func (s *QueryStagePlanNode) SetChildren(v []*QueryStagePlanNode) *QueryStagePlanNode { + s.Children = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetIdentifier sets the Identifier field's value. +func (s *QueryStagePlanNode) SetIdentifier(v string) *QueryStagePlanNode { + s.Identifier = &v + return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListDataCatalogsInput) SetMaxResults(v int64) *ListDataCatalogsInput { - s.MaxResults = &v +// SetName sets the Name field's value. +func (s *QueryStagePlanNode) SetName(v string) *QueryStagePlanNode { + s.Name = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListDataCatalogsInput) SetNextToken(v string) *ListDataCatalogsInput { - s.NextToken = &v +// SetRemoteSources sets the RemoteSources field's value. +func (s *QueryStagePlanNode) SetRemoteSources(v []*string) *QueryStagePlanNode { + s.RemoteSources = v return s } -type ListDataCatalogsOutput struct { - _ struct{} `type:"structure"` +// A resource, such as a workgroup, was not found. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // A summary list of data catalogs. - DataCatalogsSummary []*DataCatalogSummary `type:"list"` + Message_ *string `locationName:"Message" type:"string"` - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + // The name of the Amazon resource. + ResourceName *string `min:"1" type:"string"` } // String returns the string representation. @@ -5996,7 +15307,7 @@ type ListDataCatalogsOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListDataCatalogsOutput) String() string { +func (s ResourceNotFoundException) String() string { return awsutil.Prettify(s) } @@ -6005,38 +15316,97 @@ func (s ListDataCatalogsOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListDataCatalogsOutput) GoString() string { +func (s ResourceNotFoundException) GoString() string { return s.String() } -// SetDataCatalogsSummary sets the DataCatalogsSummary field's value. -func (s *ListDataCatalogsOutput) SetDataCatalogsSummary(v []*DataCatalogSummary) *ListDataCatalogsOutput { - s.DataCatalogsSummary = v - return s +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } } -// SetNextToken sets the NextToken field's value. -func (s *ListDataCatalogsOutput) SetNextToken(v string) *ListDataCatalogsOutput { - s.NextToken = &v - return s +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" } -type ListDatabasesInput struct { - _ struct{} `type:"structure"` +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} - // The name of the data catalog that contains the databases to return. - // - // CatalogName is a required field - CatalogName *string `min:"1" type:"string" required:"true"` +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} - // Specifies the maximum number of results to return. - MaxResults *int64 `min:"1" type:"integer"` +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The location in Amazon S3 where query and calculation results are stored +// and the encryption option, if any, used for query and calculation results. +// These are known as "client-side settings". If workgroup settings override +// client-side settings, then the query uses the workgroup settings. +type ResultConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates that an Amazon S3 canned ACL should be set to control ownership + // of stored query results. Currently the only supported canned ACL is BUCKET_OWNER_FULL_CONTROL. + // This is a client-side setting. If workgroup settings override client-side + // settings, then the query uses the ACL configuration that is specified for + // the workgroup, and also uses the location for storing query results specified + // in the workgroup. For more information, see WorkGroupConfiguration$EnforceWorkGroupConfiguration + // and Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). + AclConfiguration *AclConfiguration `type:"structure"` + + // If query and calculation results are encrypted in Amazon S3, indicates the + // encryption option used (for example, SSE_KMS or CSE_KMS) and key information. + // This is a client-side setting. If workgroup settings override client-side + // settings, then the query uses the encryption configuration that is specified + // for the workgroup, and also uses the location for storing query results specified + // in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration + // and Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The Amazon Web Services account ID that you expect to be the owner of the + // Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, + // Athena uses the value for ExpectedBucketOwner when it makes Amazon S3 calls + // to your specified output location. If the ExpectedBucketOwner Amazon Web + // Services account ID does not match the actual owner of the Amazon S3 bucket, + // the call fails with a permissions error. + // + // This is a client-side setting. If workgroup settings override client-side + // settings, then the query uses the ExpectedBucketOwner setting that is specified + // for the workgroup, and also uses the location for storing query results specified + // in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration + // and Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). + ExpectedBucketOwner *string `min:"12" type:"string"` + + // The location in Amazon S3 where your query and calculation results are stored, + // such as s3://path/to/query/bucket/. To run the query, you must specify the + // query results location using one of the ways: either for individual queries + // using either this setting (client-side), or in the workgroup, using WorkGroupConfiguration. + // If none of them is set, Athena issues an error that no output location is + // provided. For more information, see Working with query results, recent queries, + // and output files (https://docs.aws.amazon.com/athena/latest/ug/querying.html). + // If workgroup settings override client-side settings, then the query uses + // the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + OutputLocation *string `type:"string"` } // String returns the string representation. @@ -6044,7 +15414,7 @@ type ListDatabasesInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListDatabasesInput) String() string { +func (s ResultConfiguration) String() string { return awsutil.Prettify(s) } @@ -6053,24 +15423,25 @@ func (s ListDatabasesInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListDatabasesInput) GoString() string { +func (s ResultConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListDatabasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDatabasesInput"} - if s.CatalogName == nil { - invalidParams.Add(request.NewErrParamRequired("CatalogName")) - } - if s.CatalogName != nil && len(*s.CatalogName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) +func (s *ResultConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResultConfiguration"} + if s.ExpectedBucketOwner != nil && len(*s.ExpectedBucketOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("ExpectedBucketOwner", 12)) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.AclConfiguration != nil { + if err := s.AclConfiguration.Validate(); err != nil { + invalidParams.AddNested("AclConfiguration", err.(request.ErrInvalidParams)) + } } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + if s.EncryptionConfiguration != nil { + if err := s.EncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -6079,78 +15450,98 @@ func (s *ListDatabasesInput) Validate() error { return nil } -// SetCatalogName sets the CatalogName field's value. -func (s *ListDatabasesInput) SetCatalogName(v string) *ListDatabasesInput { - s.CatalogName = &v +// SetAclConfiguration sets the AclConfiguration field's value. +func (s *ResultConfiguration) SetAclConfiguration(v *AclConfiguration) *ResultConfiguration { + s.AclConfiguration = v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListDatabasesInput) SetMaxResults(v int64) *ListDatabasesInput { - s.MaxResults = &v +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *ResultConfiguration) SetEncryptionConfiguration(v *EncryptionConfiguration) *ResultConfiguration { + s.EncryptionConfiguration = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListDatabasesInput) SetNextToken(v string) *ListDatabasesInput { - s.NextToken = &v +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ResultConfiguration) SetExpectedBucketOwner(v string) *ResultConfiguration { + s.ExpectedBucketOwner = &v return s } -type ListDatabasesOutput struct { - _ struct{} `type:"structure"` +// SetOutputLocation sets the OutputLocation field's value. +func (s *ResultConfiguration) SetOutputLocation(v string) *ResultConfiguration { + s.OutputLocation = &v + return s +} - // A list of databases from a data catalog. - DatabaseList []*Database `type:"list"` +// The information about the updates in the query results, such as output location +// and encryption configuration for the query results. +type ResultConfigurationUpdates struct { + _ struct{} `type:"structure"` - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} + // The ACL configuration for the query results. + AclConfiguration *AclConfiguration `type:"structure"` -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDatabasesOutput) String() string { - return awsutil.Prettify(s) -} + // The encryption configuration for query and calculation results. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListDatabasesOutput) GoString() string { - return s.String() -} + // The Amazon Web Services account ID that you expect to be the owner of the + // Amazon S3 bucket specified by ResultConfiguration$OutputLocation. If set, + // Athena uses the value for ExpectedBucketOwner when it makes Amazon S3 calls + // to your specified output location. If the ExpectedBucketOwner Amazon Web + // Services account ID does not match the actual owner of the Amazon S3 bucket, + // the call fails with a permissions error. + // + // If workgroup settings override client-side settings, then the query uses + // the ExpectedBucketOwner setting that is specified for the workgroup, and + // also uses the location for storing query results specified in the workgroup. + // See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings + // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). + ExpectedBucketOwner *string `min:"12" type:"string"` -// SetDatabaseList sets the DatabaseList field's value. -func (s *ListDatabasesOutput) SetDatabaseList(v []*Database) *ListDatabasesOutput { - s.DatabaseList = v - return s -} + // The location in Amazon S3 where your query and calculation results are stored, + // such as s3://path/to/query/bucket/. For more information, see Working with + // query results, recent queries, and output files (https://docs.aws.amazon.com/athena/latest/ug/querying.html). + // If workgroup settings override client-side settings, then the query uses + // the location for the query results and the encryption configuration that + // are specified for the workgroup. The "workgroup settings override" is specified + // in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. + // See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + OutputLocation *string `type:"string"` -// SetNextToken sets the NextToken field's value. -func (s *ListDatabasesOutput) SetNextToken(v string) *ListDatabasesOutput { - s.NextToken = &v - return s -} + // If set to true, indicates that the previously-specified ACL configuration + // for queries in this workgroup should be ignored and set to null. If set to + // false or not set, and a value is present in the AclConfiguration of ResultConfigurationUpdates, + // the AclConfiguration in the workgroup's ResultConfiguration is updated with + // the new value. For more information, see Workgroup Settings Override Client-Side + // Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). + RemoveAclConfiguration *bool `type:"boolean"` -type ListEngineVersionsInput struct { - _ struct{} `type:"structure"` + // If set to "true", indicates that the previously-specified encryption configuration + // (also known as the client-side setting) for queries in this workgroup should + // be ignored and set to null. If set to "false" or not set, and a value is + // present in the EncryptionConfiguration in ResultConfigurationUpdates (the + // client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration + // will be updated with the new value. For more information, see Workgroup Settings + // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). + RemoveEncryptionConfiguration *bool `type:"boolean"` - // The maximum number of engine versions to return in this request. - MaxResults *int64 `min:"1" type:"integer"` + // If set to "true", removes the Amazon Web Services account ID previously specified + // for ResultConfiguration$ExpectedBucketOwner. If set to "false" or not set, + // and a value is present in the ExpectedBucketOwner in ResultConfigurationUpdates + // (the client-side setting), the ExpectedBucketOwner in the workgroup's ResultConfiguration + // is updated with the new value. For more information, see Workgroup Settings + // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). + RemoveExpectedBucketOwner *bool `type:"boolean"` - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + // If set to "true", indicates that the previously-specified query results location + // (also known as a client-side setting) for queries in this workgroup should + // be ignored and set to null. If set to "false" or not set, and a value is + // present in the OutputLocation in ResultConfigurationUpdates (the client-side + // setting), the OutputLocation in the workgroup's ResultConfiguration will + // be updated with the new value. For more information, see Workgroup Settings + // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). + RemoveOutputLocation *bool `type:"boolean"` } // String returns the string representation. @@ -6158,7 +15549,7 @@ type ListEngineVersionsInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListEngineVersionsInput) String() string { +func (s ResultConfigurationUpdates) String() string { return awsutil.Prettify(s) } @@ -6167,18 +15558,25 @@ func (s ListEngineVersionsInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListEngineVersionsInput) GoString() string { +func (s ResultConfigurationUpdates) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListEngineVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListEngineVersionsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *ResultConfigurationUpdates) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResultConfigurationUpdates"} + if s.ExpectedBucketOwner != nil && len(*s.ExpectedBucketOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("ExpectedBucketOwner", 12)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + if s.AclConfiguration != nil { + if err := s.AclConfiguration.Validate(); err != nil { + invalidParams.AddNested("AclConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.EncryptionConfiguration != nil { + if err := s.EncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -6187,77 +15585,68 @@ func (s *ListEngineVersionsInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *ListEngineVersionsInput) SetMaxResults(v int64) *ListEngineVersionsInput { - s.MaxResults = &v +// SetAclConfiguration sets the AclConfiguration field's value. +func (s *ResultConfigurationUpdates) SetAclConfiguration(v *AclConfiguration) *ResultConfigurationUpdates { + s.AclConfiguration = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListEngineVersionsInput) SetNextToken(v string) *ListEngineVersionsInput { - s.NextToken = &v +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *ResultConfigurationUpdates) SetEncryptionConfiguration(v *EncryptionConfiguration) *ResultConfigurationUpdates { + s.EncryptionConfiguration = v return s } -type ListEngineVersionsOutput struct { - _ struct{} `type:"structure"` - - // A list of engine versions that are available to choose from. - EngineVersions []*EngineVersion `type:"list"` +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ResultConfigurationUpdates) SetExpectedBucketOwner(v string) *ResultConfigurationUpdates { + s.ExpectedBucketOwner = &v + return s +} - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` +// SetOutputLocation sets the OutputLocation field's value. +func (s *ResultConfigurationUpdates) SetOutputLocation(v string) *ResultConfigurationUpdates { + s.OutputLocation = &v + return s } -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEngineVersionsOutput) String() string { - return awsutil.Prettify(s) +// SetRemoveAclConfiguration sets the RemoveAclConfiguration field's value. +func (s *ResultConfigurationUpdates) SetRemoveAclConfiguration(v bool) *ResultConfigurationUpdates { + s.RemoveAclConfiguration = &v + return s } -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListEngineVersionsOutput) GoString() string { - return s.String() +// SetRemoveEncryptionConfiguration sets the RemoveEncryptionConfiguration field's value. +func (s *ResultConfigurationUpdates) SetRemoveEncryptionConfiguration(v bool) *ResultConfigurationUpdates { + s.RemoveEncryptionConfiguration = &v + return s } -// SetEngineVersions sets the EngineVersions field's value. -func (s *ListEngineVersionsOutput) SetEngineVersions(v []*EngineVersion) *ListEngineVersionsOutput { - s.EngineVersions = v +// SetRemoveExpectedBucketOwner sets the RemoveExpectedBucketOwner field's value. +func (s *ResultConfigurationUpdates) SetRemoveExpectedBucketOwner(v bool) *ResultConfigurationUpdates { + s.RemoveExpectedBucketOwner = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListEngineVersionsOutput) SetNextToken(v string) *ListEngineVersionsOutput { - s.NextToken = &v +// SetRemoveOutputLocation sets the RemoveOutputLocation field's value. +func (s *ResultConfigurationUpdates) SetRemoveOutputLocation(v bool) *ResultConfigurationUpdates { + s.RemoveOutputLocation = &v return s } -type ListNamedQueriesInput struct { +// Specifies whether previous query results are reused, and if so, their maximum +// age. +type ResultReuseByAgeConfiguration struct { _ struct{} `type:"structure"` - // The maximum number of queries to return in this request. - MaxResults *int64 `type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + // True if previous query results can be reused when the query is run; otherwise, + // false. The default is false. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` - // The name of the workgroup from which the named queries are being returned. - // If a workgroup is not specified, the saved queries for the primary workgroup - // are returned. - WorkGroup *string `type:"string"` + // Specifies, in minutes, the maximum age of a previous query result that Athena + // should consider for reuse. The default is 60. + MaxAgeInMinutes *int64 `type:"integer"` } // String returns the string representation. @@ -6265,7 +15654,7 @@ type ListNamedQueriesInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListNamedQueriesInput) String() string { +func (s ResultReuseByAgeConfiguration) String() string { return awsutil.Prettify(s) } @@ -6274,15 +15663,15 @@ func (s ListNamedQueriesInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListNamedQueriesInput) GoString() string { +func (s ResultReuseByAgeConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListNamedQueriesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListNamedQueriesInput"} - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) +func (s *ResultReuseByAgeConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResultReuseByAgeConfiguration"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) } if invalidParams.Len() > 0 { @@ -6291,35 +15680,25 @@ func (s *ListNamedQueriesInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *ListNamedQueriesInput) SetMaxResults(v int64) *ListNamedQueriesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListNamedQueriesInput) SetNextToken(v string) *ListNamedQueriesInput { - s.NextToken = &v +// SetEnabled sets the Enabled field's value. +func (s *ResultReuseByAgeConfiguration) SetEnabled(v bool) *ResultReuseByAgeConfiguration { + s.Enabled = &v return s } -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListNamedQueriesInput) SetWorkGroup(v string) *ListNamedQueriesInput { - s.WorkGroup = &v +// SetMaxAgeInMinutes sets the MaxAgeInMinutes field's value. +func (s *ResultReuseByAgeConfiguration) SetMaxAgeInMinutes(v int64) *ResultReuseByAgeConfiguration { + s.MaxAgeInMinutes = &v return s } -type ListNamedQueriesOutput struct { +// Specifies the query result reuse behavior for the query. +type ResultReuseConfiguration struct { _ struct{} `type:"structure"` - // The list of unique query IDs. - NamedQueryIds []*string `min:"1" type:"list"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + // Specifies whether previous query results are reused, and if so, their maximum + // age. + ResultReuseByAgeConfiguration *ResultReuseByAgeConfiguration `type:"structure"` } // String returns the string representation. @@ -6327,7 +15706,7 @@ type ListNamedQueriesOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListNamedQueriesOutput) String() string { +func (s ResultReuseConfiguration) String() string { return awsutil.Prettify(s) } @@ -6336,38 +15715,40 @@ func (s ListNamedQueriesOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListNamedQueriesOutput) GoString() string { +func (s ResultReuseConfiguration) GoString() string { return s.String() } -// SetNamedQueryIds sets the NamedQueryIds field's value. -func (s *ListNamedQueriesOutput) SetNamedQueryIds(v []*string) *ListNamedQueriesOutput { - s.NamedQueryIds = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResultReuseConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResultReuseConfiguration"} + if s.ResultReuseByAgeConfiguration != nil { + if err := s.ResultReuseByAgeConfiguration.Validate(); err != nil { + invalidParams.AddNested("ResultReuseByAgeConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextToken sets the NextToken field's value. -func (s *ListNamedQueriesOutput) SetNextToken(v string) *ListNamedQueriesOutput { - s.NextToken = &v +// SetResultReuseByAgeConfiguration sets the ResultReuseByAgeConfiguration field's value. +func (s *ResultReuseConfiguration) SetResultReuseByAgeConfiguration(v *ResultReuseByAgeConfiguration) *ResultReuseConfiguration { + s.ResultReuseByAgeConfiguration = v return s } -type ListPreparedStatementsInput struct { +// Contains information about whether the result of a previous query was reused. +type ResultReuseInformation struct { _ struct{} `type:"structure"` - // The maximum number of results to return in this request. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The workgroup to list the prepared statements for. + // True if a previous query result was reused; false if the result was generated + // from a new run of the query. // - // WorkGroup is a required field - WorkGroup *string `type:"string" required:"true"` + // ReusedPreviousResult is a required field + ReusedPreviousResult *bool `type:"boolean" required:"true"` } // String returns the string representation. @@ -6375,7 +15756,7 @@ type ListPreparedStatementsInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListPreparedStatementsInput) String() string { +func (s ResultReuseInformation) String() string { return awsutil.Prettify(s) } @@ -6384,58 +15765,27 @@ func (s ListPreparedStatementsInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListPreparedStatementsInput) GoString() string { +func (s ResultReuseInformation) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListPreparedStatementsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPreparedStatementsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.WorkGroup == nil { - invalidParams.Add(request.NewErrParamRequired("WorkGroup")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListPreparedStatementsInput) SetMaxResults(v int64) *ListPreparedStatementsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListPreparedStatementsInput) SetNextToken(v string) *ListPreparedStatementsInput { - s.NextToken = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListPreparedStatementsInput) SetWorkGroup(v string) *ListPreparedStatementsInput { - s.WorkGroup = &v +// SetReusedPreviousResult sets the ReusedPreviousResult field's value. +func (s *ResultReuseInformation) SetReusedPreviousResult(v bool) *ResultReuseInformation { + s.ReusedPreviousResult = &v return s } -type ListPreparedStatementsOutput struct { +// The metadata and rows that make up a query result set. The metadata describes +// the column structure and data types. To return a ResultSet object, use GetQueryResults. +type ResultSet struct { _ struct{} `type:"structure"` - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + // The metadata that describes the column structure and data types of a table + // of query results. + ResultSetMetadata *ResultSetMetadata `type:"structure"` - // The list of prepared statements for the workgroup. - PreparedStatements []*PreparedStatementSummary `type:"list"` + // The rows in the table. + Rows []*Row `type:"list"` } // String returns the string representation. @@ -6443,7 +15793,7 @@ type ListPreparedStatementsOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListPreparedStatementsOutput) String() string { +func (s ResultSet) String() string { return awsutil.Prettify(s) } @@ -6452,38 +15802,29 @@ func (s ListPreparedStatementsOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListPreparedStatementsOutput) GoString() string { +func (s ResultSet) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListPreparedStatementsOutput) SetNextToken(v string) *ListPreparedStatementsOutput { - s.NextToken = &v +// SetResultSetMetadata sets the ResultSetMetadata field's value. +func (s *ResultSet) SetResultSetMetadata(v *ResultSetMetadata) *ResultSet { + s.ResultSetMetadata = v return s } -// SetPreparedStatements sets the PreparedStatements field's value. -func (s *ListPreparedStatementsOutput) SetPreparedStatements(v []*PreparedStatementSummary) *ListPreparedStatementsOutput { - s.PreparedStatements = v +// SetRows sets the Rows field's value. +func (s *ResultSet) SetRows(v []*Row) *ResultSet { + s.Rows = v return s } -type ListQueryExecutionsInput struct { +// The metadata that describes the column structure and data types of a table +// of query results. To return a ResultSetMetadata object, use GetQueryResults. +type ResultSetMetadata struct { _ struct{} `type:"structure"` - // The maximum number of query executions to return in this request. - MaxResults *int64 `type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` - - // The name of the workgroup from which queries are being returned. If a workgroup - // is not specified, a list of available query execution IDs for the queries - // in the primary workgroup is returned. - WorkGroup *string `type:"string"` + // Information about the columns returned in a query result metadata. + ColumnInfo []*ColumnInfo `type:"list"` } // String returns the string representation. @@ -6491,7 +15832,7 @@ type ListQueryExecutionsInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListQueryExecutionsInput) String() string { +func (s ResultSetMetadata) String() string { return awsutil.Prettify(s) } @@ -6500,49 +15841,22 @@ func (s ListQueryExecutionsInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListQueryExecutionsInput) GoString() string { +func (s ResultSetMetadata) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListQueryExecutionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListQueryExecutionsInput"} - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListQueryExecutionsInput) SetMaxResults(v int64) *ListQueryExecutionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListQueryExecutionsInput) SetNextToken(v string) *ListQueryExecutionsInput { - s.NextToken = &v - return s -} - -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListQueryExecutionsInput) SetWorkGroup(v string) *ListQueryExecutionsInput { - s.WorkGroup = &v +// SetColumnInfo sets the ColumnInfo field's value. +func (s *ResultSetMetadata) SetColumnInfo(v []*ColumnInfo) *ResultSetMetadata { + s.ColumnInfo = v return s } -type ListQueryExecutionsOutput struct { +// The rows that make up a query result table. +type Row struct { _ struct{} `type:"structure"` - // A token to be used by the next request if this request is truncated. - NextToken *string `min:"1" type:"string"` - - // The unique IDs of each query execution as an array of strings. - QueryExecutionIds []*string `min:"1" type:"list"` + // The data that populates a row in a query result table. + Data []*Datum `type:"list"` } // String returns the string representation. @@ -6550,7 +15864,7 @@ type ListQueryExecutionsOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListQueryExecutionsOutput) String() string { +func (s Row) String() string { return awsutil.Prettify(s) } @@ -6559,47 +15873,22 @@ func (s ListQueryExecutionsOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListQueryExecutionsOutput) GoString() string { +func (s Row) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListQueryExecutionsOutput) SetNextToken(v string) *ListQueryExecutionsOutput { - s.NextToken = &v - return s -} - -// SetQueryExecutionIds sets the QueryExecutionIds field's value. -func (s *ListQueryExecutionsOutput) SetQueryExecutionIds(v []*string) *ListQueryExecutionsOutput { - s.QueryExecutionIds = v +// SetData sets the Data field's value. +func (s *Row) SetData(v []*Datum) *Row { + s.Data = v return s } -type ListTableMetadataInput struct { - _ struct{} `type:"structure"` - - // The name of the data catalog for which table metadata should be returned. - // - // CatalogName is a required field - CatalogName *string `min:"1" type:"string" required:"true"` - - // The name of the database for which table metadata should be returned. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` - - // A regex filter that pattern-matches table names. If no expression is supplied, - // metadata for all tables are listed. - Expression *string `type:"string"` - - // Specifies the maximum number of results to return. - MaxResults *int64 `min:"1" type:"integer"` +// The specified session already exists. +type SessionAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + Message_ *string `locationName:"Message" type:"string"` } // String returns the string representation. @@ -6607,7 +15896,7 @@ type ListTableMetadataInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListTableMetadataInput) String() string { +func (s SessionAlreadyExistsException) String() string { return awsutil.Prettify(s) } @@ -6616,79 +15905,66 @@ func (s ListTableMetadataInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListTableMetadataInput) GoString() string { +func (s SessionAlreadyExistsException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTableMetadataInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTableMetadataInput"} - if s.CatalogName == nil { - invalidParams.Add(request.NewErrParamRequired("CatalogName")) - } - if s.CatalogName != nil && len(*s.CatalogName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) - } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } - if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) +func newErrorSessionAlreadyExistsException(v protocol.ResponseMetadata) error { + return &SessionAlreadyExistsException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// Code returns the exception type name. +func (s *SessionAlreadyExistsException) Code() string { + return "SessionAlreadyExistsException" } -// SetCatalogName sets the CatalogName field's value. -func (s *ListTableMetadataInput) SetCatalogName(v string) *ListTableMetadataInput { - s.CatalogName = &v - return s +// Message returns the exception's message. +func (s *SessionAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// SetDatabaseName sets the DatabaseName field's value. -func (s *ListTableMetadataInput) SetDatabaseName(v string) *ListTableMetadataInput { - s.DatabaseName = &v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SessionAlreadyExistsException) OrigErr() error { + return nil } -// SetExpression sets the Expression field's value. -func (s *ListTableMetadataInput) SetExpression(v string) *ListTableMetadataInput { - s.Expression = &v - return s +func (s *SessionAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// SetMaxResults sets the MaxResults field's value. -func (s *ListTableMetadataInput) SetMaxResults(v int64) *ListTableMetadataInput { - s.MaxResults = &v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *SessionAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } -// SetNextToken sets the NextToken field's value. -func (s *ListTableMetadataInput) SetNextToken(v string) *ListTableMetadataInput { - s.NextToken = &v - return s +// RequestID returns the service's response RequestID for request. +func (s *SessionAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } -type ListTableMetadataOutput struct { +// Contains session configuration information. +type SessionConfiguration struct { _ struct{} `type:"structure"` - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + // If query and calculation results are encrypted in Amazon S3, indicates the + // encryption option used (for example, SSE_KMS or CSE_KMS) and key information. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - // A list of table metadata. - TableMetadataList []*TableMetadata `type:"list"` + // The ARN of the execution role used to access user resources for Spark sessions + // and Identity Center enabled workgroups. This property applies only to Spark + // enabled workgroups and Identity Center enabled workgroups. + ExecutionRole *string `min:"20" type:"string"` + + // The idle timeout in seconds for the session. + IdleTimeoutSeconds *int64 `type:"long"` + + // The Amazon S3 location that stores information for the notebook. + WorkingDirectory *string `type:"string"` } // String returns the string representation. @@ -6696,7 +15972,7 @@ type ListTableMetadataOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListTableMetadataOutput) String() string { +func (s SessionConfiguration) String() string { return awsutil.Prettify(s) } @@ -6705,38 +15981,40 @@ func (s ListTableMetadataOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListTableMetadataOutput) GoString() string { +func (s SessionConfiguration) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListTableMetadataOutput) SetNextToken(v string) *ListTableMetadataOutput { - s.NextToken = &v +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *SessionConfiguration) SetEncryptionConfiguration(v *EncryptionConfiguration) *SessionConfiguration { + s.EncryptionConfiguration = v return s } -// SetTableMetadataList sets the TableMetadataList field's value. -func (s *ListTableMetadataOutput) SetTableMetadataList(v []*TableMetadata) *ListTableMetadataOutput { - s.TableMetadataList = v +// SetExecutionRole sets the ExecutionRole field's value. +func (s *SessionConfiguration) SetExecutionRole(v string) *SessionConfiguration { + s.ExecutionRole = &v return s } -type ListTagsForResourceInput struct { - _ struct{} `type:"structure"` +// SetIdleTimeoutSeconds sets the IdleTimeoutSeconds field's value. +func (s *SessionConfiguration) SetIdleTimeoutSeconds(v int64) *SessionConfiguration { + s.IdleTimeoutSeconds = &v + return s +} - // The maximum number of results to be returned per request that lists the tags - // for the resource. - MaxResults *int64 `min:"75" type:"integer"` +// SetWorkingDirectory sets the WorkingDirectory field's value. +func (s *SessionConfiguration) SetWorkingDirectory(v string) *SessionConfiguration { + s.WorkingDirectory = &v + return s +} - // The token for the next set of results, or null if there are no additional - // results for this request, where the request lists the tags for the resource - // with the specified ARN. - NextToken *string `min:"1" type:"string"` +// Contains statistics for a session. +type SessionStatistics struct { + _ struct{} `type:"structure"` - // Lists the tags for the resource with the specified ARN. - // - // ResourceARN is a required field - ResourceARN *string `min:"1" type:"string" required:"true"` + // The data processing unit execution time for a session in milliseconds. + DpuExecutionInMillis *int64 `type:"long"` } // String returns the string representation. @@ -6744,7 +16022,7 @@ type ListTagsForResourceInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListTagsForResourceInput) String() string { +func (s SessionStatistics) String() string { return awsutil.Prettify(s) } @@ -6753,58 +16031,55 @@ func (s ListTagsForResourceInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListTagsForResourceInput) GoString() string { +func (s SessionStatistics) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} - if s.MaxResults != nil && *s.MaxResults < 75 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 75)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.ResourceARN == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceARN")) - } - if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListTagsForResourceInput) SetMaxResults(v int64) *ListTagsForResourceInput { - s.MaxResults = &v +// SetDpuExecutionInMillis sets the DpuExecutionInMillis field's value. +func (s *SessionStatistics) SetDpuExecutionInMillis(v int64) *SessionStatistics { + s.DpuExecutionInMillis = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { - s.NextToken = &v - return s -} +// Contains information about the status of a session. +type SessionStatus struct { + _ struct{} `type:"structure"` -// SetResourceARN sets the ResourceARN field's value. -func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { - s.ResourceARN = &v - return s -} + // The date and time that the session ended. + EndDateTime *time.Time `type:"timestamp"` -type ListTagsForResourceOutput struct { - _ struct{} `type:"structure"` + // The date and time starting at which the session became idle. Can be empty + // if the session is not currently idle. + IdleSinceDateTime *time.Time `type:"timestamp"` - // A token to be used by the next request if this request is truncated. - NextToken *string `min:"1" type:"string"` + // The most recent date and time that the session was modified. + LastModifiedDateTime *time.Time `type:"timestamp"` - // The list of tags associated with the specified resource. - Tags []*Tag `type:"list"` + // The date and time that the session started. + StartDateTime *time.Time `type:"timestamp"` + + // The state of the session. A description of each state follows. + // + // CREATING - The session is being started, including acquiring resources. + // + // CREATED - The session has been started. + // + // IDLE - The session is able to accept a calculation. + // + // BUSY - The session is processing another task and is unable to accept a calculation. + // + // TERMINATING - The session is in the process of shutting down. + // + // TERMINATED - The session and its resources are no longer running. + // + // DEGRADED - The session has no healthy coordinators. + // + // FAILED - Due to a failure, the session and its resources are no longer running. + State *string `type:"string" enum:"SessionState"` + + // The reason for the session state change (for example, canceled because the + // session was terminated). + StateChangeReason *string `min:"1" type:"string"` } // String returns the string representation. @@ -6812,7 +16087,7 @@ type ListTagsForResourceOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListTagsForResourceOutput) String() string { +func (s SessionStatus) String() string { return awsutil.Prettify(s) } @@ -6821,93 +16096,65 @@ func (s ListTagsForResourceOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListTagsForResourceOutput) GoString() string { +func (s SessionStatus) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { - s.NextToken = &v +// SetEndDateTime sets the EndDateTime field's value. +func (s *SessionStatus) SetEndDateTime(v time.Time) *SessionStatus { + s.EndDateTime = &v return s } -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { - s.Tags = v +// SetIdleSinceDateTime sets the IdleSinceDateTime field's value. +func (s *SessionStatus) SetIdleSinceDateTime(v time.Time) *SessionStatus { + s.IdleSinceDateTime = &v return s } -type ListWorkGroupsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of workgroups to return in this request. - MaxResults *int64 `min:"1" type:"integer"` - - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListWorkGroupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s ListWorkGroupsInput) GoString() string { - return s.String() +// SetLastModifiedDateTime sets the LastModifiedDateTime field's value. +func (s *SessionStatus) SetLastModifiedDateTime(v time.Time) *SessionStatus { + s.LastModifiedDateTime = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListWorkGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListWorkGroupsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetStartDateTime sets the StartDateTime field's value. +func (s *SessionStatus) SetStartDateTime(v time.Time) *SessionStatus { + s.StartDateTime = &v + return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListWorkGroupsInput) SetMaxResults(v int64) *ListWorkGroupsInput { - s.MaxResults = &v +// SetState sets the State field's value. +func (s *SessionStatus) SetState(v string) *SessionStatus { + s.State = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListWorkGroupsInput) SetNextToken(v string) *ListWorkGroupsInput { - s.NextToken = &v +// SetStateChangeReason sets the StateChangeReason field's value. +func (s *SessionStatus) SetStateChangeReason(v string) *SessionStatus { + s.StateChangeReason = &v return s } -type ListWorkGroupsOutput struct { +// Contains summary information about a session. +type SessionSummary struct { _ struct{} `type:"structure"` - // A token generated by the Athena service that specifies where to continue - // pagination if a previous request was truncated. To obtain the next set of - // pages, pass in the NextToken from the response object of the previous page - // call. - NextToken *string `min:"1" type:"string"` + // The session description. + Description *string `min:"1" type:"string"` - // A list of WorkGroupSummary objects that include the names, descriptions, - // creation times, and states for each workgroup. - WorkGroups []*WorkGroupSummary `type:"list"` + // The engine version used by the session (for example, PySpark engine version + // 3). + EngineVersion *EngineVersion `type:"structure"` + + // The notebook version. + NotebookVersion *string `min:"1" type:"string"` + + // The session ID. + SessionId *string `min:"1" type:"string"` + + // Contains information about the session status. + Status *SessionStatus `type:"structure"` } // String returns the string representation. @@ -6915,7 +16162,7 @@ type ListWorkGroupsOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListWorkGroupsOutput) String() string { +func (s SessionSummary) String() string { return awsutil.Prettify(s) } @@ -6924,32 +16171,70 @@ func (s ListWorkGroupsOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListWorkGroupsOutput) GoString() string { +func (s SessionSummary) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListWorkGroupsOutput) SetNextToken(v string) *ListWorkGroupsOutput { - s.NextToken = &v +// SetDescription sets the Description field's value. +func (s *SessionSummary) SetDescription(v string) *SessionSummary { + s.Description = &v return s } -// SetWorkGroups sets the WorkGroups field's value. -func (s *ListWorkGroupsOutput) SetWorkGroups(v []*WorkGroupSummary) *ListWorkGroupsOutput { - s.WorkGroups = v +// SetEngineVersion sets the EngineVersion field's value. +func (s *SessionSummary) SetEngineVersion(v *EngineVersion) *SessionSummary { + s.EngineVersion = v return s } -// An exception that Athena received when it called a custom metastore. Occurs -// if the error is not caused by user input (InvalidRequestException) or from -// the Athena platform (InternalServerException). For example, if a user-created -// Lambda function is missing permissions, the Lambda 4XX exception is returned -// in a MetadataException. -type MetadataException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` +// SetNotebookVersion sets the NotebookVersion field's value. +func (s *SessionSummary) SetNotebookVersion(v string) *SessionSummary { + s.NotebookVersion = &v + return s +} - Message_ *string `locationName:"Message" type:"string"` +// SetSessionId sets the SessionId field's value. +func (s *SessionSummary) SetSessionId(v string) *SessionSummary { + s.SessionId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SessionSummary) SetStatus(v *SessionStatus) *SessionSummary { + s.Status = v + return s +} + +type StartCalculationExecutionInput struct { + _ struct{} `type:"structure"` + + // Contains configuration information for the calculation. + // + // Deprecated: Kepler Post GA Tasks : https://sim.amazon.com/issues/ATHENA-39828 + CalculationConfiguration *CalculationConfiguration `deprecated:"true" type:"structure"` + + // A unique case-sensitive string used to ensure the request to create the calculation + // is idempotent (executes only once). If another StartCalculationExecutionRequest + // is received, the same response is returned and another calculation is not + // created. If a parameter has changed, an error is returned. + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for + // users. If you are not using the Amazon Web Services SDK or the Amazon Web + // Services CLI, you must provide this token or the action will fail. + ClientRequestToken *string `min:"32" type:"string"` + + // A string that contains the code of the calculation. Use this parameter instead + // of CalculationConfiguration$CodeBlock, which is deprecated. + CodeBlock *string `type:"string"` + + // A description of the calculation. + Description *string `min:"1" type:"string"` + + // The session ID. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -6957,7 +16242,7 @@ type MetadataException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MetadataException) String() string { +func (s StartCalculationExecutionInput) String() string { return awsutil.Prettify(s) } @@ -6966,76 +16251,86 @@ func (s MetadataException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s MetadataException) GoString() string { +func (s StartCalculationExecutionInput) GoString() string { return s.String() } -func newErrorMetadataException(v protocol.ResponseMetadata) error { - return &MetadataException{ - RespMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartCalculationExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartCalculationExecutionInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) } -} -// Code returns the exception type name. -func (s *MetadataException) Code() string { - return "MetadataException" + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// Message returns the exception's message. -func (s *MetadataException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetCalculationConfiguration sets the CalculationConfiguration field's value. +func (s *StartCalculationExecutionInput) SetCalculationConfiguration(v *CalculationConfiguration) *StartCalculationExecutionInput { + s.CalculationConfiguration = v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *MetadataException) OrigErr() error { - return nil +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *StartCalculationExecutionInput) SetClientRequestToken(v string) *StartCalculationExecutionInput { + s.ClientRequestToken = &v + return s } -func (s *MetadataException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetCodeBlock sets the CodeBlock field's value. +func (s *StartCalculationExecutionInput) SetCodeBlock(v string) *StartCalculationExecutionInput { + s.CodeBlock = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *MetadataException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetDescription sets the Description field's value. +func (s *StartCalculationExecutionInput) SetDescription(v string) *StartCalculationExecutionInput { + s.Description = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s *MetadataException) RequestID() string { - return s.RespMetadata.RequestID +// SetSessionId sets the SessionId field's value. +func (s *StartCalculationExecutionInput) SetSessionId(v string) *StartCalculationExecutionInput { + s.SessionId = &v + return s } -// A query, where QueryString is the list of SQL query statements that comprise -// the query. -type NamedQuery struct { +type StartCalculationExecutionOutput struct { _ struct{} `type:"structure"` - // The database to which the query belongs. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` - - // The query description. - Description *string `min:"1" type:"string"` + // The calculation execution UUID. + CalculationExecutionId *string `min:"1" type:"string"` - // The query name. + // CREATING - The calculation is in the process of being created. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // The unique identifier of the query. - NamedQueryId *string `type:"string"` - - // The SQL query statements that comprise the query. + // CREATED - The calculation has been created and is ready to run. // - // QueryString is a required field - QueryString *string `min:"1" type:"string" required:"true"` - - // The name of the workgroup that contains the named query. - WorkGroup *string `type:"string"` + // QUEUED - The calculation has been queued for processing. + // + // RUNNING - The calculation is running. + // + // CANCELING - A request to cancel the calculation has been received and the + // system is working to stop it. + // + // CANCELED - The calculation is no longer running as the result of a cancel + // request. + // + // COMPLETED - The calculation has completed without error. + // + // FAILED - The calculation failed and is no longer running. + State *string `type:"string" enum:"CalculationExecutionState"` } // String returns the string representation. @@ -7043,7 +16338,7 @@ type NamedQuery struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s NamedQuery) String() string { +func (s StartCalculationExecutionOutput) String() string { return awsutil.Prettify(s) } @@ -7052,64 +16347,63 @@ func (s NamedQuery) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s NamedQuery) GoString() string { +func (s StartCalculationExecutionOutput) GoString() string { return s.String() } -// SetDatabase sets the Database field's value. -func (s *NamedQuery) SetDatabase(v string) *NamedQuery { - s.Database = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *NamedQuery) SetDescription(v string) *NamedQuery { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *NamedQuery) SetName(v string) *NamedQuery { - s.Name = &v +// SetCalculationExecutionId sets the CalculationExecutionId field's value. +func (s *StartCalculationExecutionOutput) SetCalculationExecutionId(v string) *StartCalculationExecutionOutput { + s.CalculationExecutionId = &v return s } -// SetNamedQueryId sets the NamedQueryId field's value. -func (s *NamedQuery) SetNamedQueryId(v string) *NamedQuery { - s.NamedQueryId = &v +// SetState sets the State field's value. +func (s *StartCalculationExecutionOutput) SetState(v string) *StartCalculationExecutionOutput { + s.State = &v return s } -// SetQueryString sets the QueryString field's value. -func (s *NamedQuery) SetQueryString(v string) *NamedQuery { - s.QueryString = &v - return s -} +type StartQueryExecutionInput struct { + _ struct{} `type:"structure"` -// SetWorkGroup sets the WorkGroup field's value. -func (s *NamedQuery) SetWorkGroup(v string) *NamedQuery { - s.WorkGroup = &v - return s -} + // A unique case-sensitive string used to ensure the request to create the query + // is idempotent (executes only once). If another StartQueryExecution request + // is received, the same response is returned and another query is not created. + // An error is returned if a parameter, such as QueryString, has changed. A + // call to StartQueryExecution that uses a previous client request token returns + // the same QueryExecutionId even if the requester doesn't have permission on + // the tables specified in QueryString. + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for + // users. If you are not using the Amazon Web Services SDK or the Amazon Web + // Services CLI, you must provide this token or the action will fail. + ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` -// A prepared SQL statement for use with Athena. -type PreparedStatement struct { - _ struct{} `type:"structure"` + // A list of values for the parameters in a query. The values are applied sequentially + // to the parameters in the query in the order in which the parameters occur. + ExecutionParameters []*string `min:"1" type:"list"` - // The description of the prepared statement. - Description *string `min:"1" type:"string"` + // The database within which the query executes. + QueryExecutionContext *QueryExecutionContext `type:"structure"` - // The last modified time of the prepared statement. - LastModifiedTime *time.Time `type:"timestamp"` + // The SQL query statements to be executed. + // + // QueryString is a required field + QueryString *string `min:"1" type:"string" required:"true"` - // The query string for the prepared statement. - QueryStatement *string `min:"1" type:"string"` + // Specifies information about where and how to save the results of the query + // execution. If the query runs in a workgroup, then workgroup's settings may + // override query settings. This affects the query results location. The workgroup + // settings override is specified in EnforceWorkGroupConfiguration (true/false) + // in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + ResultConfiguration *ResultConfiguration `type:"structure"` - // The name of the prepared statement. - StatementName *string `min:"1" type:"string"` + // Specifies the query result reuse behavior for the query. + ResultReuseConfiguration *ResultReuseConfiguration `type:"structure"` - // The name of the workgroup to which the prepared statement belongs. - WorkGroupName *string `type:"string"` + // The name of the workgroup in which the query is being started. + WorkGroup *string `type:"string"` } // String returns the string representation. @@ -7117,7 +16411,7 @@ type PreparedStatement struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s PreparedStatement) String() string { +func (s StartQueryExecutionInput) String() string { return awsutil.Prettify(s) } @@ -7126,49 +16420,94 @@ func (s PreparedStatement) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s PreparedStatement) GoString() string { +func (s StartQueryExecutionInput) GoString() string { return s.String() } -// SetDescription sets the Description field's value. -func (s *PreparedStatement) SetDescription(v string) *PreparedStatement { - s.Description = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartQueryExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartQueryExecutionInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) + } + if s.ExecutionParameters != nil && len(s.ExecutionParameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionParameters", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) + } + if s.QueryExecutionContext != nil { + if err := s.QueryExecutionContext.Validate(); err != nil { + invalidParams.AddNested("QueryExecutionContext", err.(request.ErrInvalidParams)) + } + } + if s.ResultConfiguration != nil { + if err := s.ResultConfiguration.Validate(); err != nil { + invalidParams.AddNested("ResultConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.ResultReuseConfiguration != nil { + if err := s.ResultReuseConfiguration.Validate(); err != nil { + invalidParams.AddNested("ResultReuseConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *StartQueryExecutionInput) SetClientRequestToken(v string) *StartQueryExecutionInput { + s.ClientRequestToken = &v + return s +} + +// SetExecutionParameters sets the ExecutionParameters field's value. +func (s *StartQueryExecutionInput) SetExecutionParameters(v []*string) *StartQueryExecutionInput { + s.ExecutionParameters = v + return s +} + +// SetQueryExecutionContext sets the QueryExecutionContext field's value. +func (s *StartQueryExecutionInput) SetQueryExecutionContext(v *QueryExecutionContext) *StartQueryExecutionInput { + s.QueryExecutionContext = v return s } -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *PreparedStatement) SetLastModifiedTime(v time.Time) *PreparedStatement { - s.LastModifiedTime = &v +// SetQueryString sets the QueryString field's value. +func (s *StartQueryExecutionInput) SetQueryString(v string) *StartQueryExecutionInput { + s.QueryString = &v return s } -// SetQueryStatement sets the QueryStatement field's value. -func (s *PreparedStatement) SetQueryStatement(v string) *PreparedStatement { - s.QueryStatement = &v +// SetResultConfiguration sets the ResultConfiguration field's value. +func (s *StartQueryExecutionInput) SetResultConfiguration(v *ResultConfiguration) *StartQueryExecutionInput { + s.ResultConfiguration = v return s } -// SetStatementName sets the StatementName field's value. -func (s *PreparedStatement) SetStatementName(v string) *PreparedStatement { - s.StatementName = &v +// SetResultReuseConfiguration sets the ResultReuseConfiguration field's value. +func (s *StartQueryExecutionInput) SetResultReuseConfiguration(v *ResultReuseConfiguration) *StartQueryExecutionInput { + s.ResultReuseConfiguration = v return s } -// SetWorkGroupName sets the WorkGroupName field's value. -func (s *PreparedStatement) SetWorkGroupName(v string) *PreparedStatement { - s.WorkGroupName = &v +// SetWorkGroup sets the WorkGroup field's value. +func (s *StartQueryExecutionInput) SetWorkGroup(v string) *StartQueryExecutionInput { + s.WorkGroup = &v return s } -// The name and last modified time of the prepared statement. -type PreparedStatementSummary struct { +type StartQueryExecutionOutput struct { _ struct{} `type:"structure"` - // The last modified time of the prepared statement. - LastModifiedTime *time.Time `type:"timestamp"` - - // The name of the prepared statement. - StatementName *string `min:"1" type:"string"` + // The unique ID of the query that ran as a result of this request. + QueryExecutionId *string `min:"1" type:"string"` } // String returns the string representation. @@ -7176,7 +16515,7 @@ type PreparedStatementSummary struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s PreparedStatementSummary) String() string { +func (s StartQueryExecutionOutput) String() string { return awsutil.Prettify(s) } @@ -7185,62 +16524,53 @@ func (s PreparedStatementSummary) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s PreparedStatementSummary) GoString() string { +func (s StartQueryExecutionOutput) GoString() string { return s.String() } -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *PreparedStatementSummary) SetLastModifiedTime(v time.Time) *PreparedStatementSummary { - s.LastModifiedTime = &v - return s -} - -// SetStatementName sets the StatementName field's value. -func (s *PreparedStatementSummary) SetStatementName(v string) *PreparedStatementSummary { - s.StatementName = &v +// SetQueryExecutionId sets the QueryExecutionId field's value. +func (s *StartQueryExecutionOutput) SetQueryExecutionId(v string) *StartQueryExecutionOutput { + s.QueryExecutionId = &v return s } -// Information about a single instance of a query execution. -type QueryExecution struct { +type StartSessionInput struct { _ struct{} `type:"structure"` - // The engine version that executed the query. - EngineVersion *EngineVersion `type:"structure"` - - // The SQL query statements which the query execution ran. - Query *string `min:"1" type:"string"` - - // The database in which the query execution occurred. - QueryExecutionContext *QueryExecutionContext `type:"structure"` - - // The unique identifier for each query execution. - QueryExecutionId *string `type:"string"` + // A unique case-sensitive string used to ensure the request to create the session + // is idempotent (executes only once). If another StartSessionRequest is received, + // the same response is returned and another session is not created. If a parameter + // has changed, an error is returned. + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for + // users. If you are not using the Amazon Web Services SDK or the Amazon Web + // Services CLI, you must provide this token or the action will fail. + ClientRequestToken *string `min:"32" type:"string"` - // The location in Amazon S3 where query results were stored and the encryption - // option, if any, used for query results. These are known as "client-side settings". - // If workgroup settings override client-side settings, then the query uses - // the location for the query results and the encryption configuration that - // are specified for the workgroup. - ResultConfiguration *ResultConfiguration `type:"structure"` + // The session description. + Description *string `min:"1" type:"string"` - // The type of query statement that was run. DDL indicates DDL query statements. - // DML indicates DML (Data Manipulation Language) query statements, such as - // CREATE TABLE AS SELECT. UTILITY indicates query statements other than DDL - // and DML, such as SHOW CREATE TABLE, or DESCRIBE TABLE. - StatementType *string `type:"string" enum:"StatementType"` + // Contains engine data processing unit (DPU) configuration settings and parameter + // mappings. + // + // EngineConfiguration is a required field + EngineConfiguration *EngineConfiguration `type:"structure" required:"true"` - // Query execution statistics, such as the amount of data scanned, the amount - // of time that the query took to process, and the type of statement that was - // run. - Statistics *QueryExecutionStatistics `type:"structure"` + // The notebook version. This value is supplied automatically for notebook sessions + // in the Athena console and is not required for programmatic session access. + // The only valid notebook version is Athena notebook version 1. If you specify + // a value for NotebookVersion, you must also specify a value for NotebookId. + // See EngineConfiguration$AdditionalConfigs. + NotebookVersion *string `min:"1" type:"string"` - // The completion date, current state, submission time, and state change reason - // (if applicable) for the query execution. - Status *QueryExecutionStatus `type:"structure"` + // The idle timeout in minutes for the session. + SessionIdleTimeoutInMinutes *int64 `min:"1" type:"integer"` - // The name of the workgroup in which the query ran. - WorkGroup *string `type:"string"` + // The workgroup to which the session belongs. + // + // WorkGroup is a required field + WorkGroup *string `type:"string" required:"true"` } // String returns the string representation. @@ -7248,7 +16578,7 @@ type QueryExecution struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s QueryExecution) String() string { +func (s StartSessionInput) String() string { return awsutil.Prettify(s) } @@ -7257,74 +16587,142 @@ func (s QueryExecution) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s QueryExecution) GoString() string { +func (s StartSessionInput) GoString() string { return s.String() } -// SetEngineVersion sets the EngineVersion field's value. -func (s *QueryExecution) SetEngineVersion(v *EngineVersion) *QueryExecution { - s.EngineVersion = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartSessionInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.EngineConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("EngineConfiguration")) + } + if s.NotebookVersion != nil && len(*s.NotebookVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotebookVersion", 1)) + } + if s.SessionIdleTimeoutInMinutes != nil && *s.SessionIdleTimeoutInMinutes < 1 { + invalidParams.Add(request.NewErrParamMinValue("SessionIdleTimeoutInMinutes", 1)) + } + if s.WorkGroup == nil { + invalidParams.Add(request.NewErrParamRequired("WorkGroup")) + } + if s.EngineConfiguration != nil { + if err := s.EngineConfiguration.Validate(); err != nil { + invalidParams.AddNested("EngineConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetQuery sets the Query field's value. -func (s *QueryExecution) SetQuery(v string) *QueryExecution { - s.Query = &v +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *StartSessionInput) SetClientRequestToken(v string) *StartSessionInput { + s.ClientRequestToken = &v return s } -// SetQueryExecutionContext sets the QueryExecutionContext field's value. -func (s *QueryExecution) SetQueryExecutionContext(v *QueryExecutionContext) *QueryExecution { - s.QueryExecutionContext = v +// SetDescription sets the Description field's value. +func (s *StartSessionInput) SetDescription(v string) *StartSessionInput { + s.Description = &v return s } -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *QueryExecution) SetQueryExecutionId(v string) *QueryExecution { - s.QueryExecutionId = &v +// SetEngineConfiguration sets the EngineConfiguration field's value. +func (s *StartSessionInput) SetEngineConfiguration(v *EngineConfiguration) *StartSessionInput { + s.EngineConfiguration = v return s } -// SetResultConfiguration sets the ResultConfiguration field's value. -func (s *QueryExecution) SetResultConfiguration(v *ResultConfiguration) *QueryExecution { - s.ResultConfiguration = v +// SetNotebookVersion sets the NotebookVersion field's value. +func (s *StartSessionInput) SetNotebookVersion(v string) *StartSessionInput { + s.NotebookVersion = &v return s } -// SetStatementType sets the StatementType field's value. -func (s *QueryExecution) SetStatementType(v string) *QueryExecution { - s.StatementType = &v +// SetSessionIdleTimeoutInMinutes sets the SessionIdleTimeoutInMinutes field's value. +func (s *StartSessionInput) SetSessionIdleTimeoutInMinutes(v int64) *StartSessionInput { + s.SessionIdleTimeoutInMinutes = &v return s } -// SetStatistics sets the Statistics field's value. -func (s *QueryExecution) SetStatistics(v *QueryExecutionStatistics) *QueryExecution { - s.Statistics = v +// SetWorkGroup sets the WorkGroup field's value. +func (s *StartSessionInput) SetWorkGroup(v string) *StartSessionInput { + s.WorkGroup = &v return s } -// SetStatus sets the Status field's value. -func (s *QueryExecution) SetStatus(v *QueryExecutionStatus) *QueryExecution { - s.Status = v +type StartSessionOutput struct { + _ struct{} `type:"structure"` + + // The session ID. + SessionId *string `min:"1" type:"string"` + + // The state of the session. A description of each state follows. + // + // CREATING - The session is being started, including acquiring resources. + // + // CREATED - The session has been started. + // + // IDLE - The session is able to accept a calculation. + // + // BUSY - The session is processing another task and is unable to accept a calculation. + // + // TERMINATING - The session is in the process of shutting down. + // + // TERMINATED - The session and its resources are no longer running. + // + // DEGRADED - The session has no healthy coordinators. + // + // FAILED - Due to a failure, the session and its resources are no longer running. + State *string `type:"string" enum:"SessionState"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartSessionOutput) GoString() string { + return s.String() +} + +// SetSessionId sets the SessionId field's value. +func (s *StartSessionOutput) SetSessionId(v string) *StartSessionOutput { + s.SessionId = &v return s } -// SetWorkGroup sets the WorkGroup field's value. -func (s *QueryExecution) SetWorkGroup(v string) *QueryExecution { - s.WorkGroup = &v +// SetState sets the State field's value. +func (s *StartSessionOutput) SetState(v string) *StartSessionOutput { + s.State = &v return s } -// The database and data catalog context in which the query execution occurs. -type QueryExecutionContext struct { +type StopCalculationExecutionInput struct { _ struct{} `type:"structure"` - // The name of the data catalog used in the query execution. - Catalog *string `min:"1" type:"string"` - - // The name of the database used in the query execution. The database must exist - // in the catalog. - Database *string `min:"1" type:"string"` + // The calculation execution UUID. + // + // CalculationExecutionId is a required field + CalculationExecutionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -7332,7 +16730,7 @@ type QueryExecutionContext struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s QueryExecutionContext) String() string { +func (s StopCalculationExecutionInput) String() string { return awsutil.Prettify(s) } @@ -7341,18 +16739,18 @@ func (s QueryExecutionContext) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s QueryExecutionContext) GoString() string { +func (s StopCalculationExecutionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *QueryExecutionContext) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueryExecutionContext"} - if s.Catalog != nil && len(*s.Catalog) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Catalog", 1)) +func (s *StopCalculationExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopCalculationExecutionInput"} + if s.CalculationExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("CalculationExecutionId")) } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + if s.CalculationExecutionId != nil && len(*s.CalculationExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CalculationExecutionId", 1)) } if invalidParams.Len() > 0 { @@ -7361,56 +16759,64 @@ func (s *QueryExecutionContext) Validate() error { return nil } -// SetCatalog sets the Catalog field's value. -func (s *QueryExecutionContext) SetCatalog(v string) *QueryExecutionContext { - s.Catalog = &v - return s -} - -// SetDatabase sets the Database field's value. -func (s *QueryExecutionContext) SetDatabase(v string) *QueryExecutionContext { - s.Database = &v +// SetCalculationExecutionId sets the CalculationExecutionId field's value. +func (s *StopCalculationExecutionInput) SetCalculationExecutionId(v string) *StopCalculationExecutionInput { + s.CalculationExecutionId = &v return s } -// The amount of data scanned during the query execution and the amount of time -// that it took to execute, and the type of statement that was run. -type QueryExecutionStatistics struct { +type StopCalculationExecutionOutput struct { _ struct{} `type:"structure"` - // The location and file name of a data manifest file. The manifest file is - // saved to the Athena query results location in Amazon S3. The manifest file - // tracks files that the query wrote to Amazon S3. If the query fails, the manifest - // file also tracks files that the query intended to write. The manifest is - // useful for identifying orphaned files resulting from a failed query. For - // more information, see Working with Query Results, Output Files, and Query - // History (https://docs.aws.amazon.com/athena/latest/ug/querying.html) in the - // Amazon Athena User Guide. - DataManifestLocation *string `type:"string"` - - // The number of bytes in the data that was queried. - DataScannedInBytes *int64 `type:"long"` + // CREATING - The calculation is in the process of being created. + // + // CREATED - The calculation has been created and is ready to run. + // + // QUEUED - The calculation has been queued for processing. + // + // RUNNING - The calculation is running. + // + // CANCELING - A request to cancel the calculation has been received and the + // system is working to stop it. + // + // CANCELED - The calculation is no longer running as the result of a cancel + // request. + // + // COMPLETED - The calculation has completed without error. + // + // FAILED - The calculation failed and is no longer running. + State *string `type:"string" enum:"CalculationExecutionState"` +} - // The number of milliseconds that the query took to execute. - EngineExecutionTimeInMillis *int64 `type:"long"` +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopCalculationExecutionOutput) String() string { + return awsutil.Prettify(s) +} - // The number of milliseconds that Athena took to plan the query processing - // flow. This includes the time spent retrieving table partitions from the data - // source. Note that because the query engine performs the query planning, query - // planning time is a subset of engine processing time. - QueryPlanningTimeInMillis *int64 `type:"long"` +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StopCalculationExecutionOutput) GoString() string { + return s.String() +} - // The number of milliseconds that the query was in your query queue waiting - // for resources. Note that if transient errors occur, Athena might automatically - // add the query back to the queue. - QueryQueueTimeInMillis *int64 `type:"long"` +// SetState sets the State field's value. +func (s *StopCalculationExecutionOutput) SetState(v string) *StopCalculationExecutionOutput { + s.State = &v + return s +} - // The number of milliseconds that Athena took to finalize and publish the query - // results after the query engine finished running the query. - ServiceProcessingTimeInMillis *int64 `type:"long"` +type StopQueryExecutionInput struct { + _ struct{} `type:"structure"` - // The number of milliseconds that Athena took to run the query. - TotalExecutionTimeInMillis *int64 `type:"long"` + // The unique ID of the query execution to stop. + QueryExecutionId *string `min:"1" type:"string" idempotencyToken:"true"` } // String returns the string representation. @@ -7418,7 +16824,7 @@ type QueryExecutionStatistics struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s QueryExecutionStatistics) String() string { +func (s StopQueryExecutionInput) String() string { return awsutil.Prettify(s) } @@ -7427,77 +16833,31 @@ func (s QueryExecutionStatistics) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s QueryExecutionStatistics) GoString() string { +func (s StopQueryExecutionInput) GoString() string { return s.String() } -// SetDataManifestLocation sets the DataManifestLocation field's value. -func (s *QueryExecutionStatistics) SetDataManifestLocation(v string) *QueryExecutionStatistics { - s.DataManifestLocation = &v - return s -} - -// SetDataScannedInBytes sets the DataScannedInBytes field's value. -func (s *QueryExecutionStatistics) SetDataScannedInBytes(v int64) *QueryExecutionStatistics { - s.DataScannedInBytes = &v - return s -} - -// SetEngineExecutionTimeInMillis sets the EngineExecutionTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetEngineExecutionTimeInMillis(v int64) *QueryExecutionStatistics { - s.EngineExecutionTimeInMillis = &v - return s -} - -// SetQueryPlanningTimeInMillis sets the QueryPlanningTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetQueryPlanningTimeInMillis(v int64) *QueryExecutionStatistics { - s.QueryPlanningTimeInMillis = &v - return s -} - -// SetQueryQueueTimeInMillis sets the QueryQueueTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetQueryQueueTimeInMillis(v int64) *QueryExecutionStatistics { - s.QueryQueueTimeInMillis = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopQueryExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopQueryExecutionInput"} + if s.QueryExecutionId != nil && len(*s.QueryExecutionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryExecutionId", 1)) + } -// SetServiceProcessingTimeInMillis sets the ServiceProcessingTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetServiceProcessingTimeInMillis(v int64) *QueryExecutionStatistics { - s.ServiceProcessingTimeInMillis = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetTotalExecutionTimeInMillis sets the TotalExecutionTimeInMillis field's value. -func (s *QueryExecutionStatistics) SetTotalExecutionTimeInMillis(v int64) *QueryExecutionStatistics { - s.TotalExecutionTimeInMillis = &v +// SetQueryExecutionId sets the QueryExecutionId field's value. +func (s *StopQueryExecutionInput) SetQueryExecutionId(v string) *StopQueryExecutionInput { + s.QueryExecutionId = &v return s } -// The completion date, current state, submission time, and state change reason -// (if applicable) for the query execution. -type QueryExecutionStatus struct { +type StopQueryExecutionOutput struct { _ struct{} `type:"structure"` - - // The date and time that the query completed. - CompletionDateTime *time.Time `type:"timestamp"` - - // The state of query execution. QUEUED indicates that the query has been submitted - // to the service, and Athena will execute the query as soon as resources are - // available. RUNNING indicates that the query is in execution phase. SUCCEEDED - // indicates that the query completed without errors. FAILED indicates that - // the query experienced an error and did not complete processing. CANCELLED - // indicates that a user input interrupted query execution. - // - // Athena automatically retries your queries in cases of certain transient errors. - // As a result, you may see the query state transition from RUNNING or FAILED - // to QUEUED. - State *string `type:"string" enum:"QueryExecutionState"` - - // Further detail about the status of the query. - StateChangeReason *string `type:"string"` - - // The date and time that the query was submitted. - SubmissionDateTime *time.Time `type:"timestamp"` } // String returns the string representation. @@ -7505,7 +16865,7 @@ type QueryExecutionStatus struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s QueryExecutionStatus) String() string { +func (s StopQueryExecutionOutput) String() string { return awsutil.Prettify(s) } @@ -7514,42 +16874,36 @@ func (s QueryExecutionStatus) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s QueryExecutionStatus) GoString() string { +func (s StopQueryExecutionOutput) GoString() string { return s.String() } -// SetCompletionDateTime sets the CompletionDateTime field's value. -func (s *QueryExecutionStatus) SetCompletionDateTime(v time.Time) *QueryExecutionStatus { - s.CompletionDateTime = &v - return s -} +// Contains metadata for a table. +type TableMetadata struct { + _ struct{} `type:"structure"` -// SetState sets the State field's value. -func (s *QueryExecutionStatus) SetState(v string) *QueryExecutionStatus { - s.State = &v - return s -} + // A list of the columns in the table. + Columns []*Column `type:"list"` -// SetStateChangeReason sets the StateChangeReason field's value. -func (s *QueryExecutionStatus) SetStateChangeReason(v string) *QueryExecutionStatus { - s.StateChangeReason = &v - return s -} + // The time that the table was created. + CreateTime *time.Time `type:"timestamp"` -// SetSubmissionDateTime sets the SubmissionDateTime field's value. -func (s *QueryExecutionStatus) SetSubmissionDateTime(v time.Time) *QueryExecutionStatus { - s.SubmissionDateTime = &v - return s -} + // The last time the table was accessed. + LastAccessTime *time.Time `type:"timestamp"` -// A resource, such as a workgroup, was not found. -type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + // The name of the table. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - Message_ *string `locationName:"Message" type:"string"` + // A set of custom key/value pairs for table properties. + Parameters map[string]*string `type:"map"` - ResourceName *string `min:"1" type:"string"` + // A list of the partition keys in the table. + PartitionKeys []*Column `type:"list"` + + // The type of table. In Athena, only EXTERNAL_TABLE is supported. + TableType *string `type:"string"` } // String returns the string representation. @@ -7557,7 +16911,7 @@ type ResourceNotFoundException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResourceNotFoundException) String() string { +func (s TableMetadata) String() string { return awsutil.Prettify(s) } @@ -7566,73 +16920,76 @@ func (s ResourceNotFoundException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResourceNotFoundException) GoString() string { +func (s TableMetadata) GoString() string { return s.String() } -func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { - return &ResourceNotFoundException{ - RespMetadata: v, - } +// SetColumns sets the Columns field's value. +func (s *TableMetadata) SetColumns(v []*Column) *TableMetadata { + s.Columns = v + return s } -// Code returns the exception type name. -func (s *ResourceNotFoundException) Code() string { - return "ResourceNotFoundException" +// SetCreateTime sets the CreateTime field's value. +func (s *TableMetadata) SetCreateTime(v time.Time) *TableMetadata { + s.CreateTime = &v + return s } -// Message returns the exception's message. -func (s *ResourceNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetLastAccessTime sets the LastAccessTime field's value. +func (s *TableMetadata) SetLastAccessTime(v time.Time) *TableMetadata { + s.LastAccessTime = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *ResourceNotFoundException) OrigErr() error { - return nil +// SetName sets the Name field's value. +func (s *TableMetadata) SetName(v string) *TableMetadata { + s.Name = &v + return s } -func (s *ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetParameters sets the Parameters field's value. +func (s *TableMetadata) SetParameters(v map[string]*string) *TableMetadata { + s.Parameters = v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *ResourceNotFoundException) StatusCode() int { - return s.RespMetadata.StatusCode +// SetPartitionKeys sets the PartitionKeys field's value. +func (s *TableMetadata) SetPartitionKeys(v []*Column) *TableMetadata { + s.PartitionKeys = v + return s } -// RequestID returns the service's response RequestID for request. -func (s *ResourceNotFoundException) RequestID() string { - return s.RespMetadata.RequestID +// SetTableType sets the TableType field's value. +func (s *TableMetadata) SetTableType(v string) *TableMetadata { + s.TableType = &v + return s } -// The location in Amazon S3 where query results are stored and the encryption -// option, if any, used for query results. These are known as "client-side settings". -// If workgroup settings override client-side settings, then the query uses -// the workgroup settings. -type ResultConfiguration struct { +// A label that you assign to a resource. Athena resources include workgroups, +// data catalogs, and capacity reservations. Each tag consists of a key and +// an optional value, both of which you define. For example, you can use tags +// to categorize Athena resources by purpose, owner, or environment. Use a consistent +// set of tag keys to make it easier to search and filter the resources in your +// account. For best practices, see Tagging Best Practices (https://docs.aws.amazon.com/whitepapers/latest/tagging-best-practices/tagging-best-practices.html). +// Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can +// be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers +// representable in UTF-8, and the following characters: + - = . _ : / @. Tag +// keys and values are case-sensitive. Tag keys must be unique per resource. +// If you specify more than one tag, separate them by commas. +type Tag struct { _ struct{} `type:"structure"` - // If query results are encrypted in Amazon S3, indicates the encryption option - // used (for example, SSE-KMS or CSE-KMS) and key information. This is a client-side - // setting. If workgroup settings override client-side settings, then the query - // uses the encryption configuration that is specified for the workgroup, and - // also uses the location for storing query results specified in the workgroup. - // See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings - // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + // A tag key. The tag key length is from 1 to 128 Unicode characters in UTF-8. + // You can use letters and numbers representable in UTF-8, and the following + // characters: + - = . _ : / @. Tag keys are case-sensitive and must be unique + // per resource. + Key *string `min:"1" type:"string"` - // The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. - // To run the query, you must specify the query results location using one of - // the ways: either for individual queries using either this setting (client-side), - // or in the workgroup, using WorkGroupConfiguration. If none of them is set, - // Athena issues an error that no output location is provided. For more information, - // see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html). - // If workgroup settings override client-side settings, then the query uses - // the settings specified for the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. - OutputLocation *string `type:"string"` + // A tag value. The tag value length is from 0 to 256 Unicode characters in + // UTF-8. You can use letters and numbers representable in UTF-8, and the following + // characters: + - = . _ : / @. Tag values are case-sensitive. + Value *string `type:"string"` } // String returns the string representation. @@ -7640,7 +16997,7 @@ type ResultConfiguration struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResultConfiguration) String() string { +func (s Tag) String() string { return awsutil.Prettify(s) } @@ -7649,17 +17006,15 @@ func (s ResultConfiguration) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResultConfiguration) GoString() string { +func (s Tag) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ResultConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResultConfiguration"} - if s.EncryptionConfiguration != nil { - if err := s.EncryptionConfiguration.Validate(); err != nil { - invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) - } +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -7668,52 +17023,31 @@ func (s *ResultConfiguration) Validate() error { return nil } -// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. -func (s *ResultConfiguration) SetEncryptionConfiguration(v *EncryptionConfiguration) *ResultConfiguration { - s.EncryptionConfiguration = v +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v return s } -// SetOutputLocation sets the OutputLocation field's value. -func (s *ResultConfiguration) SetOutputLocation(v string) *ResultConfiguration { - s.OutputLocation = &v +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v return s -} - -// The information about the updates in the query results, such as output location -// and encryption configuration for the query results. -type ResultConfigurationUpdates struct { - _ struct{} `type:"structure"` - - // The encryption configuration for the query results. - EncryptionConfiguration *EncryptionConfiguration `type:"structure"` - - // The location in Amazon S3 where your query results are stored, such as s3://path/to/query/bucket/. - // For more information, see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html) - // If workgroup settings override client-side settings, then the query uses - // the location for the query results and the encryption configuration that - // are specified for the workgroup. The "workgroup settings override" is specified - // in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. - // See WorkGroupConfiguration$EnforceWorkGroupConfiguration. - OutputLocation *string `type:"string"` - - // If set to "true", indicates that the previously-specified encryption configuration - // (also known as the client-side setting) for queries in this workgroup should - // be ignored and set to null. If set to "false" or not set, and a value is - // present in the EncryptionConfiguration in ResultConfigurationUpdates (the - // client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration - // will be updated with the new value. For more information, see Workgroup Settings - // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - RemoveEncryptionConfiguration *bool `type:"boolean"` +} - // If set to "true", indicates that the previously-specified query results location - // (also known as a client-side setting) for queries in this workgroup should - // be ignored and set to null. If set to "false" or not set, and a value is - // present in the OutputLocation in ResultConfigurationUpdates (the client-side - // setting), the OutputLocation in the workgroup's ResultConfiguration will - // be updated with the new value. For more information, see Workgroup Settings - // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). - RemoveOutputLocation *bool `type:"boolean"` +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // Specifies the ARN of the Athena resource to which tags are to be added. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // A collection of one or more tags, separated by commas, to be added to an + // Athena resource. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` } // String returns the string representation. @@ -7721,7 +17055,7 @@ type ResultConfigurationUpdates struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResultConfigurationUpdates) String() string { +func (s TagResourceInput) String() string { return awsutil.Prettify(s) } @@ -7730,16 +17064,30 @@ func (s ResultConfigurationUpdates) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResultConfigurationUpdates) GoString() string { +func (s TagResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ResultConfigurationUpdates) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResultConfigurationUpdates"} - if s.EncryptionConfiguration != nil { - if err := s.EncryptionConfiguration.Validate(); err != nil { - invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } } } @@ -7749,41 +17097,20 @@ func (s *ResultConfigurationUpdates) Validate() error { return nil } -// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. -func (s *ResultConfigurationUpdates) SetEncryptionConfiguration(v *EncryptionConfiguration) *ResultConfigurationUpdates { - s.EncryptionConfiguration = v - return s -} - -// SetOutputLocation sets the OutputLocation field's value. -func (s *ResultConfigurationUpdates) SetOutputLocation(v string) *ResultConfigurationUpdates { - s.OutputLocation = &v - return s -} - -// SetRemoveEncryptionConfiguration sets the RemoveEncryptionConfiguration field's value. -func (s *ResultConfigurationUpdates) SetRemoveEncryptionConfiguration(v bool) *ResultConfigurationUpdates { - s.RemoveEncryptionConfiguration = &v +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v return s } -// SetRemoveOutputLocation sets the RemoveOutputLocation field's value. -func (s *ResultConfigurationUpdates) SetRemoveOutputLocation(v bool) *ResultConfigurationUpdates { - s.RemoveOutputLocation = &v +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v return s } -// The metadata and rows that comprise a query result set. The metadata describes -// the column structure and data types. To return a ResultSet object, use GetQueryResults. -type ResultSet struct { +type TagResourceOutput struct { _ struct{} `type:"structure"` - - // The metadata that describes the column structure and data types of a table - // of query results. - ResultSetMetadata *ResultSetMetadata `type:"structure"` - - // The rows in the table. - Rows []*Row `type:"list"` } // String returns the string representation. @@ -7791,7 +17118,7 @@ type ResultSet struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResultSet) String() string { +func (s TagResourceOutput) String() string { return awsutil.Prettify(s) } @@ -7800,29 +17127,17 @@ func (s ResultSet) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResultSet) GoString() string { +func (s TagResourceOutput) GoString() string { return s.String() } -// SetResultSetMetadata sets the ResultSetMetadata field's value. -func (s *ResultSet) SetResultSetMetadata(v *ResultSetMetadata) *ResultSet { - s.ResultSetMetadata = v - return s -} - -// SetRows sets the Rows field's value. -func (s *ResultSet) SetRows(v []*Row) *ResultSet { - s.Rows = v - return s -} - -// The metadata that describes the column structure and data types of a table -// of query results. To return a ResultSetMetadata object, use GetQueryResults. -type ResultSetMetadata struct { +type TerminateSessionInput struct { _ struct{} `type:"structure"` - // Information about the columns returned in a query result metadata. - ColumnInfo []*ColumnInfo `type:"list"` + // The session ID. + // + // SessionId is a required field + SessionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -7830,7 +17145,7 @@ type ResultSetMetadata struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResultSetMetadata) String() string { +func (s TerminateSessionInput) String() string { return awsutil.Prettify(s) } @@ -7839,22 +17154,53 @@ func (s ResultSetMetadata) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ResultSetMetadata) GoString() string { +func (s TerminateSessionInput) GoString() string { return s.String() } -// SetColumnInfo sets the ColumnInfo field's value. -func (s *ResultSetMetadata) SetColumnInfo(v []*ColumnInfo) *ResultSetMetadata { - s.ColumnInfo = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateSessionInput"} + if s.SessionId == nil { + invalidParams.Add(request.NewErrParamRequired("SessionId")) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSessionId sets the SessionId field's value. +func (s *TerminateSessionInput) SetSessionId(v string) *TerminateSessionInput { + s.SessionId = &v return s } -// The rows that comprise a query result table. -type Row struct { +type TerminateSessionOutput struct { _ struct{} `type:"structure"` - // The data that populates a row in a query result table. - Data []*Datum `type:"list"` + // The state of the session. A description of each state follows. + // + // CREATING - The session is being started, including acquiring resources. + // + // CREATED - The session has been started. + // + // IDLE - The session is able to accept a calculation. + // + // BUSY - The session is processing another task and is unable to accept a calculation. + // + // TERMINATING - The session is in the process of shutting down. + // + // TERMINATED - The session and its resources are no longer running. + // + // DEGRADED - The session has no healthy coordinators. + // + // FAILED - Due to a failure, the session and its resources are no longer running. + State *string `type:"string" enum:"SessionState"` } // String returns the string representation. @@ -7862,7 +17208,7 @@ type Row struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Row) String() string { +func (s TerminateSessionOutput) String() string { return awsutil.Prettify(s) } @@ -7871,47 +17217,26 @@ func (s Row) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Row) GoString() string { +func (s TerminateSessionOutput) GoString() string { return s.String() } -// SetData sets the Data field's value. -func (s *Row) SetData(v []*Datum) *Row { - s.Data = v +// SetState sets the State field's value. +func (s *TerminateSessionOutput) SetState(v string) *TerminateSessionOutput { + s.State = &v return s } -type StartQueryExecutionInput struct { - _ struct{} `type:"structure"` - - // A unique case-sensitive string used to ensure the request to create the query - // is idempotent (executes only once). If another StartQueryExecution request - // is received, the same response is returned and another query is not created. - // If a parameter has changed, for example, the QueryString, an error is returned. - // - // This token is listed as not required because Amazon Web Services SDKs (for - // example the Amazon Web Services SDK for Java) auto-generate the token for - // users. If you are not using the Amazon Web Services SDK or the Amazon Web - // Services CLI, you must provide this token or the action will fail. - ClientRequestToken *string `min:"32" type:"string" idempotencyToken:"true"` - - // The database within which the query executes. - QueryExecutionContext *QueryExecutionContext `type:"structure"` - - // The SQL query statements to be executed. - // - // QueryString is a required field - QueryString *string `min:"1" type:"string" required:"true"` +// Indicates that the request was throttled. +type TooManyRequestsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // Specifies information about where and how to save the results of the query - // execution. If the query runs in a workgroup, then workgroup's settings may - // override query settings. This affects the query results location. The workgroup - // settings override is specified in EnforceWorkGroupConfiguration (true/false) - // in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. - ResultConfiguration *ResultConfiguration `type:"structure"` + Message_ *string `locationName:"Message" type:"string"` - // The name of the workgroup in which the query is being started. - WorkGroup *string `type:"string"` + // The reason for the query throttling, for example, when it exceeds the concurrent + // query limit. + Reason *string `type:"string" enum:"ThrottleReason"` } // String returns the string representation. @@ -7919,7 +17244,7 @@ type StartQueryExecutionInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s StartQueryExecutionInput) String() string { +func (s TooManyRequestsException) String() string { return awsutil.Prettify(s) } @@ -7928,74 +17253,62 @@ func (s StartQueryExecutionInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s StartQueryExecutionInput) GoString() string { +func (s TooManyRequestsException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *StartQueryExecutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StartQueryExecutionInput"} - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 32 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 32)) - } - if s.QueryString == nil { - invalidParams.Add(request.NewErrParamRequired("QueryString")) - } - if s.QueryString != nil && len(*s.QueryString) < 1 { - invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) - } - if s.QueryExecutionContext != nil { - if err := s.QueryExecutionContext.Validate(); err != nil { - invalidParams.AddNested("QueryExecutionContext", err.(request.ErrInvalidParams)) - } - } - if s.ResultConfiguration != nil { - if err := s.ResultConfiguration.Validate(); err != nil { - invalidParams.AddNested("ResultConfiguration", err.(request.ErrInvalidParams)) - } +func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { + return &TooManyRequestsException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// Code returns the exception type name. +func (s *TooManyRequestsException) Code() string { + return "TooManyRequestsException" } -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *StartQueryExecutionInput) SetClientRequestToken(v string) *StartQueryExecutionInput { - s.ClientRequestToken = &v - return s +// Message returns the exception's message. +func (s *TooManyRequestsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// SetQueryExecutionContext sets the QueryExecutionContext field's value. -func (s *StartQueryExecutionInput) SetQueryExecutionContext(v *QueryExecutionContext) *StartQueryExecutionInput { - s.QueryExecutionContext = v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyRequestsException) OrigErr() error { + return nil } -// SetQueryString sets the QueryString field's value. -func (s *StartQueryExecutionInput) SetQueryString(v string) *StartQueryExecutionInput { - s.QueryString = &v - return s +func (s *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } -// SetResultConfiguration sets the ResultConfiguration field's value. -func (s *StartQueryExecutionInput) SetResultConfiguration(v *ResultConfiguration) *StartQueryExecutionInput { - s.ResultConfiguration = v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } -// SetWorkGroup sets the WorkGroup field's value. -func (s *StartQueryExecutionInput) SetWorkGroup(v string) *StartQueryExecutionInput { - s.WorkGroup = &v - return s +// RequestID returns the service's response RequestID for request. +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } -type StartQueryExecutionOutput struct { +// Information about a named query ID that could not be processed. +type UnprocessedNamedQueryId struct { _ struct{} `type:"structure"` - // The unique ID of the query that ran as a result of this request. - QueryExecutionId *string `type:"string"` + // The error code returned when the processing request for the named query failed, + // if applicable. + ErrorCode *string `min:"1" type:"string"` + + // The error message returned when the processing request for the named query + // failed, if applicable. + ErrorMessage *string `type:"string"` + + // The unique identifier of the named query. + NamedQueryId *string `min:"1" type:"string"` } // String returns the string representation. @@ -8003,7 +17316,7 @@ type StartQueryExecutionOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s StartQueryExecutionOutput) String() string { +func (s UnprocessedNamedQueryId) String() string { return awsutil.Prettify(s) } @@ -8012,21 +17325,50 @@ func (s StartQueryExecutionOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s StartQueryExecutionOutput) GoString() string { +func (s UnprocessedNamedQueryId) GoString() string { return s.String() } -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *StartQueryExecutionOutput) SetQueryExecutionId(v string) *StartQueryExecutionOutput { - s.QueryExecutionId = &v +// SetErrorCode sets the ErrorCode field's value. +func (s *UnprocessedNamedQueryId) SetErrorCode(v string) *UnprocessedNamedQueryId { + s.ErrorCode = &v return s } -type StopQueryExecutionInput struct { +// SetErrorMessage sets the ErrorMessage field's value. +func (s *UnprocessedNamedQueryId) SetErrorMessage(v string) *UnprocessedNamedQueryId { + s.ErrorMessage = &v + return s +} + +// SetNamedQueryId sets the NamedQueryId field's value. +func (s *UnprocessedNamedQueryId) SetNamedQueryId(v string) *UnprocessedNamedQueryId { + s.NamedQueryId = &v + return s +} + +// The name of a prepared statement that could not be returned. +type UnprocessedPreparedStatementName struct { _ struct{} `type:"structure"` - // The unique ID of the query execution to stop. - QueryExecutionId *string `type:"string" idempotencyToken:"true"` + // The error code returned when the request for the prepared statement failed. + ErrorCode *string `min:"1" type:"string"` + + // The error message containing the reason why the prepared statement could + // not be returned. The following error messages are possible: + // + // * INVALID_INPUT - The name of the prepared statement that was provided + // is not valid (for example, the name is too long). + // + // * STATEMENT_NOT_FOUND - A prepared statement with the name provided could + // not be found. + // + // * UNAUTHORIZED - The requester does not have permission to access the + // workgroup that contains the prepared statement. + ErrorMessage *string `type:"string"` + + // The name of a prepared statement that could not be returned due to an error. + StatementName *string `min:"1" type:"string"` } // String returns the string representation. @@ -8034,7 +17376,7 @@ type StopQueryExecutionInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s StopQueryExecutionInput) String() string { +func (s UnprocessedPreparedStatementName) String() string { return awsutil.Prettify(s) } @@ -8043,64 +17385,41 @@ func (s StopQueryExecutionInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s StopQueryExecutionInput) GoString() string { +func (s UnprocessedPreparedStatementName) GoString() string { return s.String() } -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *StopQueryExecutionInput) SetQueryExecutionId(v string) *StopQueryExecutionInput { - s.QueryExecutionId = &v +// SetErrorCode sets the ErrorCode field's value. +func (s *UnprocessedPreparedStatementName) SetErrorCode(v string) *UnprocessedPreparedStatementName { + s.ErrorCode = &v return s } -type StopQueryExecutionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopQueryExecutionOutput) String() string { - return awsutil.Prettify(s) +// SetErrorMessage sets the ErrorMessage field's value. +func (s *UnprocessedPreparedStatementName) SetErrorMessage(v string) *UnprocessedPreparedStatementName { + s.ErrorMessage = &v + return s } -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s StopQueryExecutionOutput) GoString() string { - return s.String() +// SetStatementName sets the StatementName field's value. +func (s *UnprocessedPreparedStatementName) SetStatementName(v string) *UnprocessedPreparedStatementName { + s.StatementName = &v + return s } -// Contains metadata for a table. -type TableMetadata struct { +// Describes a query execution that failed to process. +type UnprocessedQueryExecutionId struct { _ struct{} `type:"structure"` - // A list of the columns in the table. - Columns []*Column `type:"list"` - - // The time that the table was created. - CreateTime *time.Time `type:"timestamp"` - - // The last time the table was accessed. - LastAccessTime *time.Time `type:"timestamp"` - - // The name of the table. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A set of custom key/value pairs for table properties. - Parameters map[string]*string `type:"map"` + // The error code returned when the query execution failed to process, if applicable. + ErrorCode *string `min:"1" type:"string"` - // A list of the partition keys in the table. - PartitionKeys []*Column `type:"list"` + // The error message returned when the query execution failed to process, if + // applicable. + ErrorMessage *string `type:"string"` - // The type of table. In Athena, only EXTERNAL_TABLE is supported. - TableType *string `type:"string"` + // The unique identifier of the query execution. + QueryExecutionId *string `min:"1" type:"string"` } // String returns the string representation. @@ -8108,7 +17427,7 @@ type TableMetadata struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TableMetadata) String() string { +func (s UnprocessedQueryExecutionId) String() string { return awsutil.Prettify(s) } @@ -8117,76 +17436,41 @@ func (s TableMetadata) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TableMetadata) GoString() string { +func (s UnprocessedQueryExecutionId) GoString() string { return s.String() } -// SetColumns sets the Columns field's value. -func (s *TableMetadata) SetColumns(v []*Column) *TableMetadata { - s.Columns = v - return s -} - -// SetCreateTime sets the CreateTime field's value. -func (s *TableMetadata) SetCreateTime(v time.Time) *TableMetadata { - s.CreateTime = &v - return s -} - -// SetLastAccessTime sets the LastAccessTime field's value. -func (s *TableMetadata) SetLastAccessTime(v time.Time) *TableMetadata { - s.LastAccessTime = &v - return s -} - -// SetName sets the Name field's value. -func (s *TableMetadata) SetName(v string) *TableMetadata { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *TableMetadata) SetParameters(v map[string]*string) *TableMetadata { - s.Parameters = v +// SetErrorCode sets the ErrorCode field's value. +func (s *UnprocessedQueryExecutionId) SetErrorCode(v string) *UnprocessedQueryExecutionId { + s.ErrorCode = &v return s } -// SetPartitionKeys sets the PartitionKeys field's value. -func (s *TableMetadata) SetPartitionKeys(v []*Column) *TableMetadata { - s.PartitionKeys = v +// SetErrorMessage sets the ErrorMessage field's value. +func (s *UnprocessedQueryExecutionId) SetErrorMessage(v string) *UnprocessedQueryExecutionId { + s.ErrorMessage = &v return s } -// SetTableType sets the TableType field's value. -func (s *TableMetadata) SetTableType(v string) *TableMetadata { - s.TableType = &v +// SetQueryExecutionId sets the QueryExecutionId field's value. +func (s *UnprocessedQueryExecutionId) SetQueryExecutionId(v string) *UnprocessedQueryExecutionId { + s.QueryExecutionId = &v return s } -// A label that you assign to a resource. In Athena, a resource can be a workgroup -// or data catalog. Each tag consists of a key and an optional value, both of -// which you define. For example, you can use tags to categorize Athena workgroups -// or data catalogs by purpose, owner, or environment. Use a consistent set -// of tag keys to make it easier to search and filter workgroups or data catalogs -// in your account. For best practices, see Tagging Best Practices (https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). -// Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can -// be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers -// representable in UTF-8, and the following characters: + - = . _ : / @. Tag -// keys and values are case-sensitive. Tag keys must be unique per resource. -// If you specify more than one tag, separate them by commas. -type Tag struct { +type UntagResourceInput struct { _ struct{} `type:"structure"` - // A tag key. The tag key length is from 1 to 128 Unicode characters in UTF-8. - // You can use letters and numbers representable in UTF-8, and the following - // characters: + - = . _ : / @. Tag keys are case-sensitive and must be unique - // per resource. - Key *string `min:"1" type:"string"` + // Specifies the ARN of the resource from which tags are to be removed. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` - // A tag value. The tag value length is from 0 to 256 Unicode characters in - // UTF-8. You can use letters and numbers representable in UTF-8, and the following - // characters: + - = . _ : / @. Tag values are case-sensitive. - Value *string `type:"string"` + // A comma-separated list of one or more tag keys whose tags are to be removed + // from the specified resource. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` } // String returns the string representation. @@ -8194,7 +17478,7 @@ type Tag struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Tag) String() string { +func (s UntagResourceInput) String() string { return awsutil.Prettify(s) } @@ -8203,15 +17487,21 @@ func (s Tag) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s Tag) GoString() string { +func (s UntagResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) } if invalidParams.Len() > 0 { @@ -8220,32 +17510,52 @@ func (s *Tag) Validate() error { return nil } -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v return s } -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v return s } -type TagResourceInput struct { +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateCapacityReservationInput struct { _ struct{} `type:"structure"` - // Specifies the ARN of the Athena resource (workgroup or data catalog) to which - // tags are to be added. + // The name of the capacity reservation. // - // ResourceARN is a required field - ResourceARN *string `min:"1" type:"string" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // A collection of one or more tags, separated by commas, to be added to an - // Athena workgroup or data catalog resource. + // The new number of requested data processing units. // - // Tags is a required field - Tags []*Tag `type:"list" required:"true"` + // TargetDpus is a required field + TargetDpus *int64 `min:"24" type:"integer" required:"true"` } // String returns the string representation. @@ -8253,7 +17563,7 @@ type TagResourceInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TagResourceInput) String() string { +func (s UpdateCapacityReservationInput) String() string { return awsutil.Prettify(s) } @@ -8262,52 +17572,45 @@ func (s TagResourceInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TagResourceInput) GoString() string { +func (s UpdateCapacityReservationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} - if s.ResourceARN == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceARN")) +func (s *UpdateCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateCapacityReservationInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) + if s.TargetDpus == nil { + invalidParams.Add(request.NewErrParamRequired("TargetDpus")) } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } + if s.TargetDpus != nil && *s.TargetDpus < 24 { + invalidParams.Add(request.NewErrParamMinValue("TargetDpus", 24)) } if invalidParams.Len() > 0 { return invalidParams } return nil -} - -// SetResourceARN sets the ResourceARN field's value. -func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { - s.ResourceARN = &v +} + +// SetName sets the Name field's value. +func (s *UpdateCapacityReservationInput) SetName(v string) *UpdateCapacityReservationInput { + s.Name = &v return s } -// SetTags sets the Tags field's value. -func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { - s.Tags = v +// SetTargetDpus sets the TargetDpus field's value. +func (s *UpdateCapacityReservationInput) SetTargetDpus(v int64) *UpdateCapacityReservationInput { + s.TargetDpus = &v return s } -type TagResourceOutput struct { +type UpdateCapacityReservationOutput struct { _ struct{} `type:"structure"` } @@ -8316,7 +17619,7 @@ type TagResourceOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TagResourceOutput) String() string { +func (s UpdateCapacityReservationOutput) String() string { return awsutil.Prettify(s) } @@ -8325,20 +17628,44 @@ func (s TagResourceOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TagResourceOutput) GoString() string { +func (s UpdateCapacityReservationOutput) GoString() string { return s.String() } -// Indicates that the request was throttled. -type TooManyRequestsException struct { - _ struct{} `type:"structure"` - RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` +type UpdateDataCatalogInput struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"Message" type:"string"` + // New or modified text that describes the data catalog. + Description *string `min:"1" type:"string"` - // The reason for the query throttling, for example, when it exceeds the concurrent - // query limit. - Reason *string `type:"string" enum:"ThrottleReason"` + // The name of the data catalog to update. The catalog name must be unique for + // the Amazon Web Services account and can use a maximum of 127 alphanumeric, + // underscore, at sign, or hyphen characters. The remainder of the length constraint + // of 256 is reserved for use by Athena. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Specifies the Lambda function or functions to use for updating the data catalog. + // This is a mapping whose values depend on the catalog type. + // + // * For the HIVE data catalog type, use the following syntax. The metadata-function + // parameter is required. The sdk-version parameter is optional and defaults + // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number + // + // * For the LAMBDA data catalog type, use one of the following sets of required + // parameters, but not both. If you have one Lambda function that processes + // metadata and another for reading the actual data, use the following syntax. + // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn + // If you have a composite Lambda function that processes both metadata and + // data, use the following syntax to specify your Lambda function. function=lambda_arn + Parameters map[string]*string `type:"map"` + + // Specifies the type of data catalog to update. Specify LAMBDA for a federated + // catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"DataCatalogType"` } // String returns the string representation. @@ -8346,7 +17673,7 @@ type TooManyRequestsException struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TooManyRequestsException) String() string { +func (s UpdateDataCatalogInput) String() string { return awsutil.Prettify(s) } @@ -8355,62 +17682,98 @@ func (s TooManyRequestsException) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s TooManyRequestsException) GoString() string { +func (s UpdateDataCatalogInput) GoString() string { return s.String() } -func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { - return &TooManyRequestsException{ - RespMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDataCatalogInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDataCatalogInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams } + return nil } -// Code returns the exception type name. -func (s *TooManyRequestsException) Code() string { - return "TooManyRequestsException" +// SetDescription sets the Description field's value. +func (s *UpdateDataCatalogInput) SetDescription(v string) *UpdateDataCatalogInput { + s.Description = &v + return s } -// Message returns the exception's message. -func (s *TooManyRequestsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetName sets the Name field's value. +func (s *UpdateDataCatalogInput) SetName(v string) *UpdateDataCatalogInput { + s.Name = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s *TooManyRequestsException) OrigErr() error { - return nil +// SetParameters sets the Parameters field's value. +func (s *UpdateDataCatalogInput) SetParameters(v map[string]*string) *UpdateDataCatalogInput { + s.Parameters = v + return s } -func (s *TooManyRequestsException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetType sets the Type field's value. +func (s *UpdateDataCatalogInput) SetType(v string) *UpdateDataCatalogInput { + s.Type = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s *TooManyRequestsException) StatusCode() int { - return s.RespMetadata.StatusCode +type UpdateDataCatalogOutput struct { + _ struct{} `type:"structure"` } -// RequestID returns the service's response RequestID for request. -func (s *TooManyRequestsException) RequestID() string { - return s.RespMetadata.RequestID +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateDataCatalogOutput) String() string { + return awsutil.Prettify(s) } -// Information about a named query ID that could not be processed. -type UnprocessedNamedQueryId struct { +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateDataCatalogOutput) GoString() string { + return s.String() +} + +type UpdateNamedQueryInput struct { _ struct{} `type:"structure"` - // The error code returned when the processing request for the named query failed, - // if applicable. - ErrorCode *string `min:"1" type:"string"` + // The query description. + Description *string `type:"string"` - // The error message returned when the processing request for the named query - // failed, if applicable. - ErrorMessage *string `type:"string"` + // The name of the query. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // The unique identifier of the named query. - NamedQueryId *string `type:"string"` + // The unique identifier (UUID) of the query. + // + // NamedQueryId is a required field + NamedQueryId *string `min:"1" type:"string" required:"true"` + + // The contents of the query with all query statements. + // + // QueryString is a required field + QueryString *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -8418,7 +17781,7 @@ type UnprocessedNamedQueryId struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UnprocessedNamedQueryId) String() string { +func (s UpdateNamedQueryInput) String() string { return awsutil.Prettify(s) } @@ -8427,41 +17790,64 @@ func (s UnprocessedNamedQueryId) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UnprocessedNamedQueryId) GoString() string { +func (s UpdateNamedQueryInput) GoString() string { return s.String() } -// SetErrorCode sets the ErrorCode field's value. -func (s *UnprocessedNamedQueryId) SetErrorCode(v string) *UnprocessedNamedQueryId { - s.ErrorCode = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateNamedQueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateNamedQueryInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.NamedQueryId == nil { + invalidParams.Add(request.NewErrParamRequired("NamedQueryId")) + } + if s.NamedQueryId != nil && len(*s.NamedQueryId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NamedQueryId", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateNamedQueryInput) SetDescription(v string) *UpdateNamedQueryInput { + s.Description = &v return s } -// SetErrorMessage sets the ErrorMessage field's value. -func (s *UnprocessedNamedQueryId) SetErrorMessage(v string) *UnprocessedNamedQueryId { - s.ErrorMessage = &v +// SetName sets the Name field's value. +func (s *UpdateNamedQueryInput) SetName(v string) *UpdateNamedQueryInput { + s.Name = &v return s } // SetNamedQueryId sets the NamedQueryId field's value. -func (s *UnprocessedNamedQueryId) SetNamedQueryId(v string) *UnprocessedNamedQueryId { +func (s *UpdateNamedQueryInput) SetNamedQueryId(v string) *UpdateNamedQueryInput { s.NamedQueryId = &v return s } -// Describes a query execution that failed to process. -type UnprocessedQueryExecutionId struct { - _ struct{} `type:"structure"` - - // The error code returned when the query execution failed to process, if applicable. - ErrorCode *string `min:"1" type:"string"` - - // The error message returned when the query execution failed to process, if - // applicable. - ErrorMessage *string `type:"string"` +// SetQueryString sets the QueryString field's value. +func (s *UpdateNamedQueryInput) SetQueryString(v string) *UpdateNamedQueryInput { + s.QueryString = &v + return s +} - // The unique identifier of the query execution. - QueryExecutionId *string `type:"string"` +type UpdateNamedQueryOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation. @@ -8469,7 +17855,7 @@ type UnprocessedQueryExecutionId struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UnprocessedQueryExecutionId) String() string { +func (s UpdateNamedQueryOutput) String() string { return awsutil.Prettify(s) } @@ -8478,41 +17864,39 @@ func (s UnprocessedQueryExecutionId) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UnprocessedQueryExecutionId) GoString() string { +func (s UpdateNamedQueryOutput) GoString() string { return s.String() } -// SetErrorCode sets the ErrorCode field's value. -func (s *UnprocessedQueryExecutionId) SetErrorCode(v string) *UnprocessedQueryExecutionId { - s.ErrorCode = &v - return s -} - -// SetErrorMessage sets the ErrorMessage field's value. -func (s *UnprocessedQueryExecutionId) SetErrorMessage(v string) *UnprocessedQueryExecutionId { - s.ErrorMessage = &v - return s -} +type UpdateNotebookInput struct { + _ struct{} `type:"structure"` -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *UnprocessedQueryExecutionId) SetQueryExecutionId(v string) *UnprocessedQueryExecutionId { - s.QueryExecutionId = &v - return s -} + // A unique case-sensitive string used to ensure the request to create the notebook + // is idempotent (executes only once). + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for + // you. If you are not using the Amazon Web Services SDK or the Amazon Web Services + // CLI, you must provide this token or the action will fail. + ClientRequestToken *string `min:"1" type:"string"` -type UntagResourceInput struct { - _ struct{} `type:"structure"` + // The ID of the notebook to update. + // + // NotebookId is a required field + NotebookId *string `min:"1" type:"string" required:"true"` - // Specifies the ARN of the resource from which tags are to be removed. + // The updated content for the notebook. // - // ResourceARN is a required field - ResourceARN *string `min:"1" type:"string" required:"true"` + // Payload is a required field + Payload *string `min:"1" type:"string" required:"true"` - // A comma-separated list of one or more tag keys whose tags are to be removed - // from the specified resource. + // The active notebook session ID. Required if the notebook has an active session. + SessionId *string `min:"1" type:"string"` + + // The notebook content type. Currently, the only valid type is IPYNB. // - // TagKeys is a required field - TagKeys []*string `type:"list" required:"true"` + // Type is a required field + Type *string `type:"string" required:"true" enum:"NotebookType"` } // String returns the string representation. @@ -8520,7 +17904,7 @@ type UntagResourceInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UntagResourceInput) String() string { +func (s UpdateNotebookInput) String() string { return awsutil.Prettify(s) } @@ -8529,21 +17913,33 @@ func (s UntagResourceInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UntagResourceInput) GoString() string { +func (s UpdateNotebookInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} - if s.ResourceARN == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceARN")) +func (s *UpdateNotebookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateNotebookInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) } - if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + if s.NotebookId == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookId")) } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) + if s.NotebookId != nil && len(*s.NotebookId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) + } + if s.Payload == nil { + invalidParams.Add(request.NewErrParamRequired("Payload")) + } + if s.Payload != nil && len(*s.Payload) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Payload", 1)) + } + if s.SessionId != nil && len(*s.SessionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SessionId", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } if invalidParams.Len() > 0 { @@ -8552,73 +17948,57 @@ func (s *UntagResourceInput) Validate() error { return nil } -// SetResourceARN sets the ResourceARN field's value. -func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { - s.ResourceARN = &v +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *UpdateNotebookInput) SetClientRequestToken(v string) *UpdateNotebookInput { + s.ClientRequestToken = &v return s } -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v +// SetNotebookId sets the NotebookId field's value. +func (s *UpdateNotebookInput) SetNotebookId(v string) *UpdateNotebookInput { + s.NotebookId = &v return s } -type UntagResourceOutput struct { - _ struct{} `type:"structure"` +// SetPayload sets the Payload field's value. +func (s *UpdateNotebookInput) SetPayload(v string) *UpdateNotebookInput { + s.Payload = &v + return s } -// String returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) String() string { - return awsutil.Prettify(s) +// SetSessionId sets the SessionId field's value. +func (s *UpdateNotebookInput) SetSessionId(v string) *UpdateNotebookInput { + s.SessionId = &v + return s } -// GoString returns the string representation. -// -// API parameter values that are decorated as "sensitive" in the API will not -// be included in the string output. The member name will be present, but the -// value will be replaced with "sensitive". -func (s UntagResourceOutput) GoString() string { - return s.String() +// SetType sets the Type field's value. +func (s *UpdateNotebookInput) SetType(v string) *UpdateNotebookInput { + s.Type = &v + return s } -type UpdateDataCatalogInput struct { +type UpdateNotebookMetadataInput struct { _ struct{} `type:"structure"` - // New or modified text that describes the data catalog. - Description *string `min:"1" type:"string"` + // A unique case-sensitive string used to ensure the request to create the notebook + // is idempotent (executes only once). + // + // This token is listed as not required because Amazon Web Services SDKs (for + // example the Amazon Web Services SDK for Java) auto-generate the token for + // you. If you are not using the Amazon Web Services SDK or the Amazon Web Services + // CLI, you must provide this token or the action will fail. + ClientRequestToken *string `min:"1" type:"string"` - // The name of the data catalog to update. The catalog name must be unique for - // the Amazon Web Services account and can use a maximum of 128 alphanumeric, - // underscore, at sign, or hyphen characters. + // The name to update the notebook to. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // Specifies the Lambda function or functions to use for updating the data catalog. - // This is a mapping whose values depend on the catalog type. - // - // * For the HIVE data catalog type, use the following syntax. The metadata-function - // parameter is required. The sdk-version parameter is optional and defaults - // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number - // - // * For the LAMBDA data catalog type, use one of the following sets of required - // parameters, but not both. If you have one Lambda function that processes - // metadata and another for reading the actual data, use the following syntax. - // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn - // If you have a composite Lambda function that processes both metadata and - // data, use the following syntax to specify your Lambda function. function=lambda_arn - Parameters map[string]*string `type:"map"` - - // Specifies the type of data catalog to update. Specify LAMBDA for a federated - // catalog, HIVE for an external hive metastore, or GLUE for an Glue Data Catalog. + // The ID of the notebook to update the metadata for. // - // Type is a required field - Type *string `type:"string" required:"true" enum:"DataCatalogType"` + // NotebookId is a required field + NotebookId *string `min:"1" type:"string" required:"true"` } // String returns the string representation. @@ -8626,7 +18006,7 @@ type UpdateDataCatalogInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UpdateDataCatalogInput) String() string { +func (s UpdateNotebookMetadataInput) String() string { return awsutil.Prettify(s) } @@ -8635,15 +18015,15 @@ func (s UpdateDataCatalogInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UpdateDataCatalogInput) GoString() string { +func (s UpdateNotebookMetadataInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDataCatalogInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDataCatalogInput"} - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) +func (s *UpdateNotebookMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateNotebookMetadataInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) @@ -8651,8 +18031,11 @@ func (s *UpdateDataCatalogInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) + if s.NotebookId == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookId")) + } + if s.NotebookId != nil && len(*s.NotebookId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NotebookId", 1)) } if invalidParams.Len() > 0 { @@ -8661,31 +18044,47 @@ func (s *UpdateDataCatalogInput) Validate() error { return nil } -// SetDescription sets the Description field's value. -func (s *UpdateDataCatalogInput) SetDescription(v string) *UpdateDataCatalogInput { - s.Description = &v +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *UpdateNotebookMetadataInput) SetClientRequestToken(v string) *UpdateNotebookMetadataInput { + s.ClientRequestToken = &v return s } // SetName sets the Name field's value. -func (s *UpdateDataCatalogInput) SetName(v string) *UpdateDataCatalogInput { +func (s *UpdateNotebookMetadataInput) SetName(v string) *UpdateNotebookMetadataInput { s.Name = &v return s } -// SetParameters sets the Parameters field's value. -func (s *UpdateDataCatalogInput) SetParameters(v map[string]*string) *UpdateDataCatalogInput { - s.Parameters = v +// SetNotebookId sets the NotebookId field's value. +func (s *UpdateNotebookMetadataInput) SetNotebookId(v string) *UpdateNotebookMetadataInput { + s.NotebookId = &v return s } -// SetType sets the Type field's value. -func (s *UpdateDataCatalogInput) SetType(v string) *UpdateDataCatalogInput { - s.Type = &v - return s +type UpdateNotebookMetadataOutput struct { + _ struct{} `type:"structure"` } -type UpdateDataCatalogOutput struct { +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateNotebookMetadataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateNotebookMetadataOutput) GoString() string { + return s.String() +} + +type UpdateNotebookOutput struct { _ struct{} `type:"structure"` } @@ -8694,7 +18093,7 @@ type UpdateDataCatalogOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UpdateDataCatalogOutput) String() string { +func (s UpdateNotebookOutput) String() string { return awsutil.Prettify(s) } @@ -8703,7 +18102,7 @@ func (s UpdateDataCatalogOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s UpdateDataCatalogOutput) GoString() string { +func (s UpdateNotebookOutput) GoString() string { return s.String() } @@ -8824,7 +18223,7 @@ func (s UpdatePreparedStatementOutput) GoString() string { type UpdateWorkGroupInput struct { _ struct{} `type:"structure"` - // The workgroup configuration that will be updated for the given workgroup. + // Contains configuration updates for an Athena SQL workgroup. ConfigurationUpdates *WorkGroupConfigurationUpdates `type:"structure"` // The workgroup description. @@ -8934,12 +18333,13 @@ type WorkGroup struct { _ struct{} `type:"structure"` // The configuration of the workgroup, which includes the location in Amazon - // S3 where query results are stored, the encryption configuration, if any, - // used for query results; whether the Amazon CloudWatch Metrics are enabled - // for the workgroup; whether workgroup settings override client-side settings; - // and the data usage limits for the amount of data scanned per query or per - // workgroup. The workgroup settings override is specified in EnforceWorkGroupConfiguration - // (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. + // S3 where query and calculation results are stored, the encryption configuration, + // if any, used for query and calculation results; whether the Amazon CloudWatch + // Metrics are enabled for the workgroup; whether workgroup settings override + // client-side settings; and the data usage limits for the amount of data scanned + // per query or per workgroup. The workgroup settings override is specified + // in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. + // See WorkGroupConfiguration$EnforceWorkGroupConfiguration. Configuration *WorkGroupConfiguration `type:"structure"` // The date and time the workgroup was created. @@ -8948,6 +18348,10 @@ type WorkGroup struct { // The workgroup description. Description *string `type:"string"` + // The ARN of the IAM Identity Center enabled application associated with the + // workgroup. + IdentityCenterApplicationArn *string `type:"string"` + // The workgroup name. // // Name is a required field @@ -8993,6 +18397,12 @@ func (s *WorkGroup) SetDescription(v string) *WorkGroup { return s } +// SetIdentityCenterApplicationArn sets the IdentityCenterApplicationArn field's value. +func (s *WorkGroup) SetIdentityCenterApplicationArn(v string) *WorkGroup { + s.IdentityCenterApplicationArn = &v + return s +} + // SetName sets the Name field's value. func (s *WorkGroup) SetName(v string) *WorkGroup { s.Name = &v @@ -9006,19 +18416,37 @@ func (s *WorkGroup) SetState(v string) *WorkGroup { } // The configuration of the workgroup, which includes the location in Amazon -// S3 where query results are stored, the encryption option, if any, used for -// query results, whether the Amazon CloudWatch Metrics are enabled for the -// workgroup and whether workgroup settings override query settings, and the -// data usage limits for the amount of data scanned per query or per workgroup. -// The workgroup settings override is specified in EnforceWorkGroupConfiguration -// (true/false) in the WorkGroupConfiguration. See WorkGroupConfiguration$EnforceWorkGroupConfiguration. +// S3 where query and calculation results are stored, the encryption option, +// if any, used for query and calculation results, whether the Amazon CloudWatch +// Metrics are enabled for the workgroup and whether workgroup settings override +// query settings, and the data usage limits for the amount of data scanned +// per query or per workgroup. The workgroup settings override is specified +// in EnforceWorkGroupConfiguration (true/false) in the WorkGroupConfiguration. +// See WorkGroupConfiguration$EnforceWorkGroupConfiguration. type WorkGroupConfiguration struct { _ struct{} `type:"structure"` + // Specifies a user defined JSON string that is passed to the notebook engine. + AdditionalConfiguration *string `min:"1" type:"string"` + // The upper data usage limit (cutoff) for the amount of bytes a single query // in a workgroup is allowed to scan. BytesScannedCutoffPerQuery *int64 `min:"1e+07" type:"long"` + // Specifies the KMS key that is used to encrypt the user's data stores in Athena. + // This setting does not apply to Athena SQL workgroups. + CustomerContentEncryptionConfiguration *CustomerContentEncryptionConfiguration `type:"structure"` + + // Enforces a minimal level of encryption for the workgroup for query and calculation + // results that are written to Amazon S3. When enabled, workgroup users can + // set encryption only to the minimum level set by the administrator or higher + // when they submit queries. + // + // The EnforceWorkGroupConfiguration setting takes precedence over the EnableMinimumEncryptionConfiguration + // flag. This means that if EnforceWorkGroupConfiguration is true, the EnableMinimumEncryptionConfiguration + // flag is ignored, and the workgroup configuration for encryption is used. + EnableMinimumEncryptionConfiguration *bool `type:"boolean"` + // If set to "true", the settings for the workgroup override client-side settings. // If set to "false", client-side settings are used. For more information, see // Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). @@ -9029,9 +18457,21 @@ type WorkGroupConfiguration struct { // regardless of this setting. EngineVersion *EngineVersion `type:"structure"` + // The ARN of the execution role used to access user resources for Spark sessions + // and IAM Identity Center enabled workgroups. This property applies only to + // Spark enabled workgroups and IAM Identity Center enabled workgroups. The + // property is required for IAM Identity Center enabled workgroups. + ExecutionRole *string `min:"20" type:"string"` + + // Specifies whether the workgroup is IAM Identity Center supported. + IdentityCenterConfiguration *IdentityCenterConfiguration `type:"structure"` + // Indicates that the Amazon CloudWatch metrics are enabled for the workgroup. PublishCloudWatchMetricsEnabled *bool `type:"boolean"` + // Specifies whether Amazon S3 access grants are enabled for query results. + QueryResultsS3AccessGrantsConfiguration *QueryResultsS3AccessGrantsConfiguration `type:"structure"` + // If set to true, allows members assigned to a workgroup to reference Amazon // S3 Requester Pays buckets in queries. If set to false, workgroup members // cannot query data from Requester Pays buckets, and queries that retrieve @@ -9042,12 +18482,13 @@ type WorkGroupConfiguration struct { RequesterPaysEnabled *bool `type:"boolean"` // The configuration for the workgroup, which includes the location in Amazon - // S3 where query results are stored and the encryption option, if any, used - // for query results. To run the query, you must specify the query results location - // using one of the ways: either in the workgroup using this setting, or for - // individual queries (client-side), using ResultConfiguration$OutputLocation. + // S3 where query and calculation results are stored and the encryption option, + // if any, used for query and calculation results. To run the query, you must + // specify the query results location using one of the ways: either in the workgroup + // using this setting, or for individual queries (client-side), using ResultConfiguration$OutputLocation. // If none of them is set, Athena issues an error that no output location is - // provided. For more information, see Query Results (https://docs.aws.amazon.com/athena/latest/ug/querying.html). + // provided. For more information, see Working with query results, recent queries, + // and output files (https://docs.aws.amazon.com/athena/latest/ug/querying.html). ResultConfiguration *ResultConfiguration `type:"structure"` } @@ -9072,14 +18513,30 @@ func (s WorkGroupConfiguration) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *WorkGroupConfiguration) Validate() error { invalidParams := request.ErrInvalidParams{Context: "WorkGroupConfiguration"} + if s.AdditionalConfiguration != nil && len(*s.AdditionalConfiguration) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AdditionalConfiguration", 1)) + } if s.BytesScannedCutoffPerQuery != nil && *s.BytesScannedCutoffPerQuery < 1e+07 { invalidParams.Add(request.NewErrParamMinValue("BytesScannedCutoffPerQuery", 1e+07)) } + if s.ExecutionRole != nil && len(*s.ExecutionRole) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRole", 20)) + } + if s.CustomerContentEncryptionConfiguration != nil { + if err := s.CustomerContentEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("CustomerContentEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } if s.EngineVersion != nil { if err := s.EngineVersion.Validate(); err != nil { invalidParams.AddNested("EngineVersion", err.(request.ErrInvalidParams)) } } + if s.QueryResultsS3AccessGrantsConfiguration != nil { + if err := s.QueryResultsS3AccessGrantsConfiguration.Validate(); err != nil { + invalidParams.AddNested("QueryResultsS3AccessGrantsConfiguration", err.(request.ErrInvalidParams)) + } + } if s.ResultConfiguration != nil { if err := s.ResultConfiguration.Validate(); err != nil { invalidParams.AddNested("ResultConfiguration", err.(request.ErrInvalidParams)) @@ -9092,12 +18549,30 @@ func (s *WorkGroupConfiguration) Validate() error { return nil } +// SetAdditionalConfiguration sets the AdditionalConfiguration field's value. +func (s *WorkGroupConfiguration) SetAdditionalConfiguration(v string) *WorkGroupConfiguration { + s.AdditionalConfiguration = &v + return s +} + // SetBytesScannedCutoffPerQuery sets the BytesScannedCutoffPerQuery field's value. func (s *WorkGroupConfiguration) SetBytesScannedCutoffPerQuery(v int64) *WorkGroupConfiguration { s.BytesScannedCutoffPerQuery = &v return s } +// SetCustomerContentEncryptionConfiguration sets the CustomerContentEncryptionConfiguration field's value. +func (s *WorkGroupConfiguration) SetCustomerContentEncryptionConfiguration(v *CustomerContentEncryptionConfiguration) *WorkGroupConfiguration { + s.CustomerContentEncryptionConfiguration = v + return s +} + +// SetEnableMinimumEncryptionConfiguration sets the EnableMinimumEncryptionConfiguration field's value. +func (s *WorkGroupConfiguration) SetEnableMinimumEncryptionConfiguration(v bool) *WorkGroupConfiguration { + s.EnableMinimumEncryptionConfiguration = &v + return s +} + // SetEnforceWorkGroupConfiguration sets the EnforceWorkGroupConfiguration field's value. func (s *WorkGroupConfiguration) SetEnforceWorkGroupConfiguration(v bool) *WorkGroupConfiguration { s.EnforceWorkGroupConfiguration = &v @@ -9110,12 +18585,30 @@ func (s *WorkGroupConfiguration) SetEngineVersion(v *EngineVersion) *WorkGroupCo return s } +// SetExecutionRole sets the ExecutionRole field's value. +func (s *WorkGroupConfiguration) SetExecutionRole(v string) *WorkGroupConfiguration { + s.ExecutionRole = &v + return s +} + +// SetIdentityCenterConfiguration sets the IdentityCenterConfiguration field's value. +func (s *WorkGroupConfiguration) SetIdentityCenterConfiguration(v *IdentityCenterConfiguration) *WorkGroupConfiguration { + s.IdentityCenterConfiguration = v + return s +} + // SetPublishCloudWatchMetricsEnabled sets the PublishCloudWatchMetricsEnabled field's value. func (s *WorkGroupConfiguration) SetPublishCloudWatchMetricsEnabled(v bool) *WorkGroupConfiguration { s.PublishCloudWatchMetricsEnabled = &v return s } +// SetQueryResultsS3AccessGrantsConfiguration sets the QueryResultsS3AccessGrantsConfiguration field's value. +func (s *WorkGroupConfiguration) SetQueryResultsS3AccessGrantsConfiguration(v *QueryResultsS3AccessGrantsConfiguration) *WorkGroupConfiguration { + s.QueryResultsS3AccessGrantsConfiguration = v + return s +} + // SetRequesterPaysEnabled sets the RequesterPaysEnabled field's value. func (s *WorkGroupConfiguration) SetRequesterPaysEnabled(v bool) *WorkGroupConfiguration { s.RequesterPaysEnabled = &v @@ -9129,18 +18622,36 @@ func (s *WorkGroupConfiguration) SetResultConfiguration(v *ResultConfiguration) } // The configuration information that will be updated for this workgroup, which -// includes the location in Amazon S3 where query results are stored, the encryption -// option, if any, used for query results, whether the Amazon CloudWatch Metrics -// are enabled for the workgroup, whether the workgroup settings override the -// client-side settings, and the data usage limit for the amount of bytes scanned -// per query, if it is specified. +// includes the location in Amazon S3 where query and calculation results are +// stored, the encryption option, if any, used for query results, whether the +// Amazon CloudWatch Metrics are enabled for the workgroup, whether the workgroup +// settings override the client-side settings, and the data usage limit for +// the amount of bytes scanned per query, if it is specified. type WorkGroupConfigurationUpdates struct { _ struct{} `type:"structure"` + // Contains a user defined string in JSON format for a Spark-enabled workgroup. + AdditionalConfiguration *string `min:"1" type:"string"` + // The upper limit (cutoff) for the amount of bytes a single query in a workgroup // is allowed to scan. BytesScannedCutoffPerQuery *int64 `min:"1e+07" type:"long"` + // Specifies the customer managed KMS key that is used to encrypt the user's + // data stores in Athena. When an Amazon Web Services managed key is used, this + // value is null. This setting does not apply to Athena SQL workgroups. + CustomerContentEncryptionConfiguration *CustomerContentEncryptionConfiguration `type:"structure"` + + // Enforces a minimal level of encryption for the workgroup for query and calculation + // results that are written to Amazon S3. When enabled, workgroup users can + // set encryption only to the minimum level set by the administrator or higher + // when they submit queries. This setting does not apply to Spark-enabled workgroups. + // + // The EnforceWorkGroupConfiguration setting takes precedence over the EnableMinimumEncryptionConfiguration + // flag. This means that if EnforceWorkGroupConfiguration is true, the EnableMinimumEncryptionConfiguration + // flag is ignored, and the workgroup configuration for encryption is used. + EnableMinimumEncryptionConfiguration *bool `type:"boolean"` + // If set to "true", the settings for the workgroup override client-side settings. // If set to "false" client-side settings are used. For more information, see // Workgroup Settings Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). @@ -9152,12 +18663,24 @@ type WorkGroupConfigurationUpdates struct { // workgroup run on the preview engine regardless of this setting. EngineVersion *EngineVersion `type:"structure"` + // The ARN of the execution role used to access user resources for Spark sessions + // and Identity Center enabled workgroups. This property applies only to Spark + // enabled workgroups and Identity Center enabled workgroups. + ExecutionRole *string `min:"20" type:"string"` + // Indicates whether this workgroup enables publishing metrics to Amazon CloudWatch. PublishCloudWatchMetricsEnabled *bool `type:"boolean"` + // Specifies whether Amazon S3 access grants are enabled for query results. + QueryResultsS3AccessGrantsConfiguration *QueryResultsS3AccessGrantsConfiguration `type:"structure"` + // Indicates that the data usage control limit per query is removed. WorkGroupConfiguration$BytesScannedCutoffPerQuery RemoveBytesScannedCutoffPerQuery *bool `type:"boolean"` + // Removes content encryption configuration from an Apache Spark-enabled Athena + // workgroup. + RemoveCustomerContentEncryptionConfiguration *bool `type:"boolean"` + // If set to true, allows members assigned to a workgroup to specify Amazon // S3 Requester Pays buckets in queries. If set to false, workgroup members // cannot query data from Requester Pays buckets, and queries that retrieve @@ -9194,14 +18717,30 @@ func (s WorkGroupConfigurationUpdates) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *WorkGroupConfigurationUpdates) Validate() error { invalidParams := request.ErrInvalidParams{Context: "WorkGroupConfigurationUpdates"} + if s.AdditionalConfiguration != nil && len(*s.AdditionalConfiguration) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AdditionalConfiguration", 1)) + } if s.BytesScannedCutoffPerQuery != nil && *s.BytesScannedCutoffPerQuery < 1e+07 { invalidParams.Add(request.NewErrParamMinValue("BytesScannedCutoffPerQuery", 1e+07)) } + if s.ExecutionRole != nil && len(*s.ExecutionRole) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRole", 20)) + } + if s.CustomerContentEncryptionConfiguration != nil { + if err := s.CustomerContentEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("CustomerContentEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } if s.EngineVersion != nil { if err := s.EngineVersion.Validate(); err != nil { invalidParams.AddNested("EngineVersion", err.(request.ErrInvalidParams)) } } + if s.QueryResultsS3AccessGrantsConfiguration != nil { + if err := s.QueryResultsS3AccessGrantsConfiguration.Validate(); err != nil { + invalidParams.AddNested("QueryResultsS3AccessGrantsConfiguration", err.(request.ErrInvalidParams)) + } + } if s.ResultConfigurationUpdates != nil { if err := s.ResultConfigurationUpdates.Validate(); err != nil { invalidParams.AddNested("ResultConfigurationUpdates", err.(request.ErrInvalidParams)) @@ -9214,12 +18753,30 @@ func (s *WorkGroupConfigurationUpdates) Validate() error { return nil } +// SetAdditionalConfiguration sets the AdditionalConfiguration field's value. +func (s *WorkGroupConfigurationUpdates) SetAdditionalConfiguration(v string) *WorkGroupConfigurationUpdates { + s.AdditionalConfiguration = &v + return s +} + // SetBytesScannedCutoffPerQuery sets the BytesScannedCutoffPerQuery field's value. func (s *WorkGroupConfigurationUpdates) SetBytesScannedCutoffPerQuery(v int64) *WorkGroupConfigurationUpdates { s.BytesScannedCutoffPerQuery = &v return s } +// SetCustomerContentEncryptionConfiguration sets the CustomerContentEncryptionConfiguration field's value. +func (s *WorkGroupConfigurationUpdates) SetCustomerContentEncryptionConfiguration(v *CustomerContentEncryptionConfiguration) *WorkGroupConfigurationUpdates { + s.CustomerContentEncryptionConfiguration = v + return s +} + +// SetEnableMinimumEncryptionConfiguration sets the EnableMinimumEncryptionConfiguration field's value. +func (s *WorkGroupConfigurationUpdates) SetEnableMinimumEncryptionConfiguration(v bool) *WorkGroupConfigurationUpdates { + s.EnableMinimumEncryptionConfiguration = &v + return s +} + // SetEnforceWorkGroupConfiguration sets the EnforceWorkGroupConfiguration field's value. func (s *WorkGroupConfigurationUpdates) SetEnforceWorkGroupConfiguration(v bool) *WorkGroupConfigurationUpdates { s.EnforceWorkGroupConfiguration = &v @@ -9232,18 +18789,36 @@ func (s *WorkGroupConfigurationUpdates) SetEngineVersion(v *EngineVersion) *Work return s } +// SetExecutionRole sets the ExecutionRole field's value. +func (s *WorkGroupConfigurationUpdates) SetExecutionRole(v string) *WorkGroupConfigurationUpdates { + s.ExecutionRole = &v + return s +} + // SetPublishCloudWatchMetricsEnabled sets the PublishCloudWatchMetricsEnabled field's value. func (s *WorkGroupConfigurationUpdates) SetPublishCloudWatchMetricsEnabled(v bool) *WorkGroupConfigurationUpdates { s.PublishCloudWatchMetricsEnabled = &v return s } +// SetQueryResultsS3AccessGrantsConfiguration sets the QueryResultsS3AccessGrantsConfiguration field's value. +func (s *WorkGroupConfigurationUpdates) SetQueryResultsS3AccessGrantsConfiguration(v *QueryResultsS3AccessGrantsConfiguration) *WorkGroupConfigurationUpdates { + s.QueryResultsS3AccessGrantsConfiguration = v + return s +} + // SetRemoveBytesScannedCutoffPerQuery sets the RemoveBytesScannedCutoffPerQuery field's value. func (s *WorkGroupConfigurationUpdates) SetRemoveBytesScannedCutoffPerQuery(v bool) *WorkGroupConfigurationUpdates { s.RemoveBytesScannedCutoffPerQuery = &v return s } +// SetRemoveCustomerContentEncryptionConfiguration sets the RemoveCustomerContentEncryptionConfiguration field's value. +func (s *WorkGroupConfigurationUpdates) SetRemoveCustomerContentEncryptionConfiguration(v bool) *WorkGroupConfigurationUpdates { + s.RemoveCustomerContentEncryptionConfiguration = &v + return s +} + // SetRequesterPaysEnabled sets the RequesterPaysEnabled field's value. func (s *WorkGroupConfigurationUpdates) SetRequesterPaysEnabled(v bool) *WorkGroupConfigurationUpdates { s.RequesterPaysEnabled = &v @@ -9272,6 +18847,10 @@ type WorkGroupSummary struct { // of this setting. EngineVersion *EngineVersion `type:"structure"` + // The ARN of the IAM Identity Center enabled application associated with the + // workgroup. + IdentityCenterApplicationArn *string `type:"string"` + // The name of the workgroup. Name *string `type:"string"` @@ -9315,6 +18894,12 @@ func (s *WorkGroupSummary) SetEngineVersion(v *EngineVersion) *WorkGroupSummary return s } +// SetIdentityCenterApplicationArn sets the IdentityCenterApplicationArn field's value. +func (s *WorkGroupSummary) SetIdentityCenterApplicationArn(v string) *WorkGroupSummary { + s.IdentityCenterApplicationArn = &v + return s +} + // SetName sets the Name field's value. func (s *WorkGroupSummary) SetName(v string) *WorkGroupSummary { s.Name = &v @@ -9327,6 +18912,110 @@ func (s *WorkGroupSummary) SetState(v string) *WorkGroupSummary { return s } +const ( + // AuthenticationTypeDirectoryIdentity is a AuthenticationType enum value + AuthenticationTypeDirectoryIdentity = "DIRECTORY_IDENTITY" +) + +// AuthenticationType_Values returns all elements of the AuthenticationType enum +func AuthenticationType_Values() []string { + return []string{ + AuthenticationTypeDirectoryIdentity, + } +} + +const ( + // CalculationExecutionStateCreating is a CalculationExecutionState enum value + CalculationExecutionStateCreating = "CREATING" + + // CalculationExecutionStateCreated is a CalculationExecutionState enum value + CalculationExecutionStateCreated = "CREATED" + + // CalculationExecutionStateQueued is a CalculationExecutionState enum value + CalculationExecutionStateQueued = "QUEUED" + + // CalculationExecutionStateRunning is a CalculationExecutionState enum value + CalculationExecutionStateRunning = "RUNNING" + + // CalculationExecutionStateCanceling is a CalculationExecutionState enum value + CalculationExecutionStateCanceling = "CANCELING" + + // CalculationExecutionStateCanceled is a CalculationExecutionState enum value + CalculationExecutionStateCanceled = "CANCELED" + + // CalculationExecutionStateCompleted is a CalculationExecutionState enum value + CalculationExecutionStateCompleted = "COMPLETED" + + // CalculationExecutionStateFailed is a CalculationExecutionState enum value + CalculationExecutionStateFailed = "FAILED" +) + +// CalculationExecutionState_Values returns all elements of the CalculationExecutionState enum +func CalculationExecutionState_Values() []string { + return []string{ + CalculationExecutionStateCreating, + CalculationExecutionStateCreated, + CalculationExecutionStateQueued, + CalculationExecutionStateRunning, + CalculationExecutionStateCanceling, + CalculationExecutionStateCanceled, + CalculationExecutionStateCompleted, + CalculationExecutionStateFailed, + } +} + +const ( + // CapacityAllocationStatusPending is a CapacityAllocationStatus enum value + CapacityAllocationStatusPending = "PENDING" + + // CapacityAllocationStatusSucceeded is a CapacityAllocationStatus enum value + CapacityAllocationStatusSucceeded = "SUCCEEDED" + + // CapacityAllocationStatusFailed is a CapacityAllocationStatus enum value + CapacityAllocationStatusFailed = "FAILED" +) + +// CapacityAllocationStatus_Values returns all elements of the CapacityAllocationStatus enum +func CapacityAllocationStatus_Values() []string { + return []string{ + CapacityAllocationStatusPending, + CapacityAllocationStatusSucceeded, + CapacityAllocationStatusFailed, + } +} + +const ( + // CapacityReservationStatusPending is a CapacityReservationStatus enum value + CapacityReservationStatusPending = "PENDING" + + // CapacityReservationStatusActive is a CapacityReservationStatus enum value + CapacityReservationStatusActive = "ACTIVE" + + // CapacityReservationStatusCancelling is a CapacityReservationStatus enum value + CapacityReservationStatusCancelling = "CANCELLING" + + // CapacityReservationStatusCancelled is a CapacityReservationStatus enum value + CapacityReservationStatusCancelled = "CANCELLED" + + // CapacityReservationStatusFailed is a CapacityReservationStatus enum value + CapacityReservationStatusFailed = "FAILED" + + // CapacityReservationStatusUpdatePending is a CapacityReservationStatus enum value + CapacityReservationStatusUpdatePending = "UPDATE_PENDING" +) + +// CapacityReservationStatus_Values returns all elements of the CapacityReservationStatus enum +func CapacityReservationStatus_Values() []string { + return []string{ + CapacityReservationStatusPending, + CapacityReservationStatusActive, + CapacityReservationStatusCancelling, + CapacityReservationStatusCancelled, + CapacityReservationStatusFailed, + CapacityReservationStatusUpdatePending, + } +} + const ( // ColumnNullableNotNull is a ColumnNullable enum value ColumnNullableNotNull = "NOT_NULL" @@ -9387,6 +19076,70 @@ func EncryptionOption_Values() []string { } } +const ( + // ExecutorStateCreating is a ExecutorState enum value + ExecutorStateCreating = "CREATING" + + // ExecutorStateCreated is a ExecutorState enum value + ExecutorStateCreated = "CREATED" + + // ExecutorStateRegistered is a ExecutorState enum value + ExecutorStateRegistered = "REGISTERED" + + // ExecutorStateTerminating is a ExecutorState enum value + ExecutorStateTerminating = "TERMINATING" + + // ExecutorStateTerminated is a ExecutorState enum value + ExecutorStateTerminated = "TERMINATED" + + // ExecutorStateFailed is a ExecutorState enum value + ExecutorStateFailed = "FAILED" +) + +// ExecutorState_Values returns all elements of the ExecutorState enum +func ExecutorState_Values() []string { + return []string{ + ExecutorStateCreating, + ExecutorStateCreated, + ExecutorStateRegistered, + ExecutorStateTerminating, + ExecutorStateTerminated, + ExecutorStateFailed, + } +} + +const ( + // ExecutorTypeCoordinator is a ExecutorType enum value + ExecutorTypeCoordinator = "COORDINATOR" + + // ExecutorTypeGateway is a ExecutorType enum value + ExecutorTypeGateway = "GATEWAY" + + // ExecutorTypeWorker is a ExecutorType enum value + ExecutorTypeWorker = "WORKER" +) + +// ExecutorType_Values returns all elements of the ExecutorType enum +func ExecutorType_Values() []string { + return []string{ + ExecutorTypeCoordinator, + ExecutorTypeGateway, + ExecutorTypeWorker, + } +} + +const ( + // NotebookTypeIpynb is a NotebookType enum value + NotebookTypeIpynb = "IPYNB" +) + +// NotebookType_Values returns all elements of the NotebookType enum +func NotebookType_Values() []string { + return []string{ + NotebookTypeIpynb, + } +} + const ( // QueryExecutionStateQueued is a QueryExecutionState enum value QueryExecutionStateQueued = "QUEUED" @@ -9415,6 +19168,58 @@ func QueryExecutionState_Values() []string { } } +const ( + // S3AclOptionBucketOwnerFullControl is a S3AclOption enum value + S3AclOptionBucketOwnerFullControl = "BUCKET_OWNER_FULL_CONTROL" +) + +// S3AclOption_Values returns all elements of the S3AclOption enum +func S3AclOption_Values() []string { + return []string{ + S3AclOptionBucketOwnerFullControl, + } +} + +const ( + // SessionStateCreating is a SessionState enum value + SessionStateCreating = "CREATING" + + // SessionStateCreated is a SessionState enum value + SessionStateCreated = "CREATED" + + // SessionStateIdle is a SessionState enum value + SessionStateIdle = "IDLE" + + // SessionStateBusy is a SessionState enum value + SessionStateBusy = "BUSY" + + // SessionStateTerminating is a SessionState enum value + SessionStateTerminating = "TERMINATING" + + // SessionStateTerminated is a SessionState enum value + SessionStateTerminated = "TERMINATED" + + // SessionStateDegraded is a SessionState enum value + SessionStateDegraded = "DEGRADED" + + // SessionStateFailed is a SessionState enum value + SessionStateFailed = "FAILED" +) + +// SessionState_Values returns all elements of the SessionState enum +func SessionState_Values() []string { + return []string{ + SessionStateCreating, + SessionStateCreated, + SessionStateIdle, + SessionStateBusy, + SessionStateTerminating, + SessionStateTerminated, + SessionStateDegraded, + SessionStateFailed, + } +} + const ( // StatementTypeDdl is a StatementType enum value StatementTypeDdl = "DDL" diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/athenaiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/athena/athenaiface/interface.go index 5c973467..69eae3c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/athenaiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/athenaiface/interface.go @@ -23,37 +23,37 @@ import ( // can be stubbed out for unit testing your code with the SDK without needing // to inject custom request handlers into the SDK's request pipeline. // -// // myFunc uses an SDK service client to make a request to -// // Amazon Athena. -// func myFunc(svc athenaiface.AthenaAPI) bool { -// // Make svc.BatchGetNamedQuery request -// } +// // myFunc uses an SDK service client to make a request to +// // Amazon Athena. +// func myFunc(svc athenaiface.AthenaAPI) bool { +// // Make svc.BatchGetNamedQuery request +// } // -// func main() { -// sess := session.New() -// svc := athena.New(sess) +// func main() { +// sess := session.New() +// svc := athena.New(sess) // -// myFunc(svc) -// } +// myFunc(svc) +// } // // In your _test.go file: // -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockAthenaClient struct { -// athenaiface.AthenaAPI -// } -// func (m *mockAthenaClient) BatchGetNamedQuery(input *athena.BatchGetNamedQueryInput) (*athena.BatchGetNamedQueryOutput, error) { -// // mock response/functionality -// } +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockAthenaClient struct { +// athenaiface.AthenaAPI +// } +// func (m *mockAthenaClient) BatchGetNamedQuery(input *athena.BatchGetNamedQueryInput) (*athena.BatchGetNamedQueryOutput, error) { +// // mock response/functionality +// } // -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockAthenaClient{} +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockAthenaClient{} // -// myfunc(mockSvc) +// myfunc(mockSvc) // -// // Verify myFunc's functionality -// } +// // Verify myFunc's functionality +// } // // It is important to note that this interface will have breaking changes // when the service model is updated and adds new API operations, paginators, @@ -64,10 +64,22 @@ type AthenaAPI interface { BatchGetNamedQueryWithContext(aws.Context, *athena.BatchGetNamedQueryInput, ...request.Option) (*athena.BatchGetNamedQueryOutput, error) BatchGetNamedQueryRequest(*athena.BatchGetNamedQueryInput) (*request.Request, *athena.BatchGetNamedQueryOutput) + BatchGetPreparedStatement(*athena.BatchGetPreparedStatementInput) (*athena.BatchGetPreparedStatementOutput, error) + BatchGetPreparedStatementWithContext(aws.Context, *athena.BatchGetPreparedStatementInput, ...request.Option) (*athena.BatchGetPreparedStatementOutput, error) + BatchGetPreparedStatementRequest(*athena.BatchGetPreparedStatementInput) (*request.Request, *athena.BatchGetPreparedStatementOutput) + BatchGetQueryExecution(*athena.BatchGetQueryExecutionInput) (*athena.BatchGetQueryExecutionOutput, error) BatchGetQueryExecutionWithContext(aws.Context, *athena.BatchGetQueryExecutionInput, ...request.Option) (*athena.BatchGetQueryExecutionOutput, error) BatchGetQueryExecutionRequest(*athena.BatchGetQueryExecutionInput) (*request.Request, *athena.BatchGetQueryExecutionOutput) + CancelCapacityReservation(*athena.CancelCapacityReservationInput) (*athena.CancelCapacityReservationOutput, error) + CancelCapacityReservationWithContext(aws.Context, *athena.CancelCapacityReservationInput, ...request.Option) (*athena.CancelCapacityReservationOutput, error) + CancelCapacityReservationRequest(*athena.CancelCapacityReservationInput) (*request.Request, *athena.CancelCapacityReservationOutput) + + CreateCapacityReservation(*athena.CreateCapacityReservationInput) (*athena.CreateCapacityReservationOutput, error) + CreateCapacityReservationWithContext(aws.Context, *athena.CreateCapacityReservationInput, ...request.Option) (*athena.CreateCapacityReservationOutput, error) + CreateCapacityReservationRequest(*athena.CreateCapacityReservationInput) (*request.Request, *athena.CreateCapacityReservationOutput) + CreateDataCatalog(*athena.CreateDataCatalogInput) (*athena.CreateDataCatalogOutput, error) CreateDataCatalogWithContext(aws.Context, *athena.CreateDataCatalogInput, ...request.Option) (*athena.CreateDataCatalogOutput, error) CreateDataCatalogRequest(*athena.CreateDataCatalogInput) (*request.Request, *athena.CreateDataCatalogOutput) @@ -76,14 +88,26 @@ type AthenaAPI interface { CreateNamedQueryWithContext(aws.Context, *athena.CreateNamedQueryInput, ...request.Option) (*athena.CreateNamedQueryOutput, error) CreateNamedQueryRequest(*athena.CreateNamedQueryInput) (*request.Request, *athena.CreateNamedQueryOutput) + CreateNotebook(*athena.CreateNotebookInput) (*athena.CreateNotebookOutput, error) + CreateNotebookWithContext(aws.Context, *athena.CreateNotebookInput, ...request.Option) (*athena.CreateNotebookOutput, error) + CreateNotebookRequest(*athena.CreateNotebookInput) (*request.Request, *athena.CreateNotebookOutput) + CreatePreparedStatement(*athena.CreatePreparedStatementInput) (*athena.CreatePreparedStatementOutput, error) CreatePreparedStatementWithContext(aws.Context, *athena.CreatePreparedStatementInput, ...request.Option) (*athena.CreatePreparedStatementOutput, error) CreatePreparedStatementRequest(*athena.CreatePreparedStatementInput) (*request.Request, *athena.CreatePreparedStatementOutput) + CreatePresignedNotebookUrl(*athena.CreatePresignedNotebookUrlInput) (*athena.CreatePresignedNotebookUrlOutput, error) + CreatePresignedNotebookUrlWithContext(aws.Context, *athena.CreatePresignedNotebookUrlInput, ...request.Option) (*athena.CreatePresignedNotebookUrlOutput, error) + CreatePresignedNotebookUrlRequest(*athena.CreatePresignedNotebookUrlInput) (*request.Request, *athena.CreatePresignedNotebookUrlOutput) + CreateWorkGroup(*athena.CreateWorkGroupInput) (*athena.CreateWorkGroupOutput, error) CreateWorkGroupWithContext(aws.Context, *athena.CreateWorkGroupInput, ...request.Option) (*athena.CreateWorkGroupOutput, error) CreateWorkGroupRequest(*athena.CreateWorkGroupInput) (*request.Request, *athena.CreateWorkGroupOutput) + DeleteCapacityReservation(*athena.DeleteCapacityReservationInput) (*athena.DeleteCapacityReservationOutput, error) + DeleteCapacityReservationWithContext(aws.Context, *athena.DeleteCapacityReservationInput, ...request.Option) (*athena.DeleteCapacityReservationOutput, error) + DeleteCapacityReservationRequest(*athena.DeleteCapacityReservationInput) (*request.Request, *athena.DeleteCapacityReservationOutput) + DeleteDataCatalog(*athena.DeleteDataCatalogInput) (*athena.DeleteDataCatalogOutput, error) DeleteDataCatalogWithContext(aws.Context, *athena.DeleteDataCatalogInput, ...request.Option) (*athena.DeleteDataCatalogOutput, error) DeleteDataCatalogRequest(*athena.DeleteDataCatalogInput) (*request.Request, *athena.DeleteDataCatalogOutput) @@ -92,6 +116,10 @@ type AthenaAPI interface { DeleteNamedQueryWithContext(aws.Context, *athena.DeleteNamedQueryInput, ...request.Option) (*athena.DeleteNamedQueryOutput, error) DeleteNamedQueryRequest(*athena.DeleteNamedQueryInput) (*request.Request, *athena.DeleteNamedQueryOutput) + DeleteNotebook(*athena.DeleteNotebookInput) (*athena.DeleteNotebookOutput, error) + DeleteNotebookWithContext(aws.Context, *athena.DeleteNotebookInput, ...request.Option) (*athena.DeleteNotebookOutput, error) + DeleteNotebookRequest(*athena.DeleteNotebookInput) (*request.Request, *athena.DeleteNotebookOutput) + DeletePreparedStatement(*athena.DeletePreparedStatementInput) (*athena.DeletePreparedStatementOutput, error) DeletePreparedStatementWithContext(aws.Context, *athena.DeletePreparedStatementInput, ...request.Option) (*athena.DeletePreparedStatementOutput, error) DeletePreparedStatementRequest(*athena.DeletePreparedStatementInput) (*request.Request, *athena.DeletePreparedStatementOutput) @@ -100,6 +128,30 @@ type AthenaAPI interface { DeleteWorkGroupWithContext(aws.Context, *athena.DeleteWorkGroupInput, ...request.Option) (*athena.DeleteWorkGroupOutput, error) DeleteWorkGroupRequest(*athena.DeleteWorkGroupInput) (*request.Request, *athena.DeleteWorkGroupOutput) + ExportNotebook(*athena.ExportNotebookInput) (*athena.ExportNotebookOutput, error) + ExportNotebookWithContext(aws.Context, *athena.ExportNotebookInput, ...request.Option) (*athena.ExportNotebookOutput, error) + ExportNotebookRequest(*athena.ExportNotebookInput) (*request.Request, *athena.ExportNotebookOutput) + + GetCalculationExecution(*athena.GetCalculationExecutionInput) (*athena.GetCalculationExecutionOutput, error) + GetCalculationExecutionWithContext(aws.Context, *athena.GetCalculationExecutionInput, ...request.Option) (*athena.GetCalculationExecutionOutput, error) + GetCalculationExecutionRequest(*athena.GetCalculationExecutionInput) (*request.Request, *athena.GetCalculationExecutionOutput) + + GetCalculationExecutionCode(*athena.GetCalculationExecutionCodeInput) (*athena.GetCalculationExecutionCodeOutput, error) + GetCalculationExecutionCodeWithContext(aws.Context, *athena.GetCalculationExecutionCodeInput, ...request.Option) (*athena.GetCalculationExecutionCodeOutput, error) + GetCalculationExecutionCodeRequest(*athena.GetCalculationExecutionCodeInput) (*request.Request, *athena.GetCalculationExecutionCodeOutput) + + GetCalculationExecutionStatus(*athena.GetCalculationExecutionStatusInput) (*athena.GetCalculationExecutionStatusOutput, error) + GetCalculationExecutionStatusWithContext(aws.Context, *athena.GetCalculationExecutionStatusInput, ...request.Option) (*athena.GetCalculationExecutionStatusOutput, error) + GetCalculationExecutionStatusRequest(*athena.GetCalculationExecutionStatusInput) (*request.Request, *athena.GetCalculationExecutionStatusOutput) + + GetCapacityAssignmentConfiguration(*athena.GetCapacityAssignmentConfigurationInput) (*athena.GetCapacityAssignmentConfigurationOutput, error) + GetCapacityAssignmentConfigurationWithContext(aws.Context, *athena.GetCapacityAssignmentConfigurationInput, ...request.Option) (*athena.GetCapacityAssignmentConfigurationOutput, error) + GetCapacityAssignmentConfigurationRequest(*athena.GetCapacityAssignmentConfigurationInput) (*request.Request, *athena.GetCapacityAssignmentConfigurationOutput) + + GetCapacityReservation(*athena.GetCapacityReservationInput) (*athena.GetCapacityReservationOutput, error) + GetCapacityReservationWithContext(aws.Context, *athena.GetCapacityReservationInput, ...request.Option) (*athena.GetCapacityReservationOutput, error) + GetCapacityReservationRequest(*athena.GetCapacityReservationInput) (*request.Request, *athena.GetCapacityReservationOutput) + GetDataCatalog(*athena.GetDataCatalogInput) (*athena.GetDataCatalogOutput, error) GetDataCatalogWithContext(aws.Context, *athena.GetDataCatalogInput, ...request.Option) (*athena.GetDataCatalogOutput, error) GetDataCatalogRequest(*athena.GetDataCatalogInput) (*request.Request, *athena.GetDataCatalogOutput) @@ -112,6 +164,10 @@ type AthenaAPI interface { GetNamedQueryWithContext(aws.Context, *athena.GetNamedQueryInput, ...request.Option) (*athena.GetNamedQueryOutput, error) GetNamedQueryRequest(*athena.GetNamedQueryInput) (*request.Request, *athena.GetNamedQueryOutput) + GetNotebookMetadata(*athena.GetNotebookMetadataInput) (*athena.GetNotebookMetadataOutput, error) + GetNotebookMetadataWithContext(aws.Context, *athena.GetNotebookMetadataInput, ...request.Option) (*athena.GetNotebookMetadataOutput, error) + GetNotebookMetadataRequest(*athena.GetNotebookMetadataInput) (*request.Request, *athena.GetNotebookMetadataOutput) + GetPreparedStatement(*athena.GetPreparedStatementInput) (*athena.GetPreparedStatementOutput, error) GetPreparedStatementWithContext(aws.Context, *athena.GetPreparedStatementInput, ...request.Option) (*athena.GetPreparedStatementOutput, error) GetPreparedStatementRequest(*athena.GetPreparedStatementInput) (*request.Request, *athena.GetPreparedStatementOutput) @@ -127,6 +183,18 @@ type AthenaAPI interface { GetQueryResultsPages(*athena.GetQueryResultsInput, func(*athena.GetQueryResultsOutput, bool) bool) error GetQueryResultsPagesWithContext(aws.Context, *athena.GetQueryResultsInput, func(*athena.GetQueryResultsOutput, bool) bool, ...request.Option) error + GetQueryRuntimeStatistics(*athena.GetQueryRuntimeStatisticsInput) (*athena.GetQueryRuntimeStatisticsOutput, error) + GetQueryRuntimeStatisticsWithContext(aws.Context, *athena.GetQueryRuntimeStatisticsInput, ...request.Option) (*athena.GetQueryRuntimeStatisticsOutput, error) + GetQueryRuntimeStatisticsRequest(*athena.GetQueryRuntimeStatisticsInput) (*request.Request, *athena.GetQueryRuntimeStatisticsOutput) + + GetSession(*athena.GetSessionInput) (*athena.GetSessionOutput, error) + GetSessionWithContext(aws.Context, *athena.GetSessionInput, ...request.Option) (*athena.GetSessionOutput, error) + GetSessionRequest(*athena.GetSessionInput) (*request.Request, *athena.GetSessionOutput) + + GetSessionStatus(*athena.GetSessionStatusInput) (*athena.GetSessionStatusOutput, error) + GetSessionStatusWithContext(aws.Context, *athena.GetSessionStatusInput, ...request.Option) (*athena.GetSessionStatusOutput, error) + GetSessionStatusRequest(*athena.GetSessionStatusInput) (*request.Request, *athena.GetSessionStatusOutput) + GetTableMetadata(*athena.GetTableMetadataInput) (*athena.GetTableMetadataOutput, error) GetTableMetadataWithContext(aws.Context, *athena.GetTableMetadataInput, ...request.Option) (*athena.GetTableMetadataOutput, error) GetTableMetadataRequest(*athena.GetTableMetadataInput) (*request.Request, *athena.GetTableMetadataOutput) @@ -135,6 +203,31 @@ type AthenaAPI interface { GetWorkGroupWithContext(aws.Context, *athena.GetWorkGroupInput, ...request.Option) (*athena.GetWorkGroupOutput, error) GetWorkGroupRequest(*athena.GetWorkGroupInput) (*request.Request, *athena.GetWorkGroupOutput) + ImportNotebook(*athena.ImportNotebookInput) (*athena.ImportNotebookOutput, error) + ImportNotebookWithContext(aws.Context, *athena.ImportNotebookInput, ...request.Option) (*athena.ImportNotebookOutput, error) + ImportNotebookRequest(*athena.ImportNotebookInput) (*request.Request, *athena.ImportNotebookOutput) + + ListApplicationDPUSizes(*athena.ListApplicationDPUSizesInput) (*athena.ListApplicationDPUSizesOutput, error) + ListApplicationDPUSizesWithContext(aws.Context, *athena.ListApplicationDPUSizesInput, ...request.Option) (*athena.ListApplicationDPUSizesOutput, error) + ListApplicationDPUSizesRequest(*athena.ListApplicationDPUSizesInput) (*request.Request, *athena.ListApplicationDPUSizesOutput) + + ListApplicationDPUSizesPages(*athena.ListApplicationDPUSizesInput, func(*athena.ListApplicationDPUSizesOutput, bool) bool) error + ListApplicationDPUSizesPagesWithContext(aws.Context, *athena.ListApplicationDPUSizesInput, func(*athena.ListApplicationDPUSizesOutput, bool) bool, ...request.Option) error + + ListCalculationExecutions(*athena.ListCalculationExecutionsInput) (*athena.ListCalculationExecutionsOutput, error) + ListCalculationExecutionsWithContext(aws.Context, *athena.ListCalculationExecutionsInput, ...request.Option) (*athena.ListCalculationExecutionsOutput, error) + ListCalculationExecutionsRequest(*athena.ListCalculationExecutionsInput) (*request.Request, *athena.ListCalculationExecutionsOutput) + + ListCalculationExecutionsPages(*athena.ListCalculationExecutionsInput, func(*athena.ListCalculationExecutionsOutput, bool) bool) error + ListCalculationExecutionsPagesWithContext(aws.Context, *athena.ListCalculationExecutionsInput, func(*athena.ListCalculationExecutionsOutput, bool) bool, ...request.Option) error + + ListCapacityReservations(*athena.ListCapacityReservationsInput) (*athena.ListCapacityReservationsOutput, error) + ListCapacityReservationsWithContext(aws.Context, *athena.ListCapacityReservationsInput, ...request.Option) (*athena.ListCapacityReservationsOutput, error) + ListCapacityReservationsRequest(*athena.ListCapacityReservationsInput) (*request.Request, *athena.ListCapacityReservationsOutput) + + ListCapacityReservationsPages(*athena.ListCapacityReservationsInput, func(*athena.ListCapacityReservationsOutput, bool) bool) error + ListCapacityReservationsPagesWithContext(aws.Context, *athena.ListCapacityReservationsInput, func(*athena.ListCapacityReservationsOutput, bool) bool, ...request.Option) error + ListDataCatalogs(*athena.ListDataCatalogsInput) (*athena.ListDataCatalogsOutput, error) ListDataCatalogsWithContext(aws.Context, *athena.ListDataCatalogsInput, ...request.Option) (*athena.ListDataCatalogsOutput, error) ListDataCatalogsRequest(*athena.ListDataCatalogsInput) (*request.Request, *athena.ListDataCatalogsOutput) @@ -153,6 +246,16 @@ type AthenaAPI interface { ListEngineVersionsWithContext(aws.Context, *athena.ListEngineVersionsInput, ...request.Option) (*athena.ListEngineVersionsOutput, error) ListEngineVersionsRequest(*athena.ListEngineVersionsInput) (*request.Request, *athena.ListEngineVersionsOutput) + ListEngineVersionsPages(*athena.ListEngineVersionsInput, func(*athena.ListEngineVersionsOutput, bool) bool) error + ListEngineVersionsPagesWithContext(aws.Context, *athena.ListEngineVersionsInput, func(*athena.ListEngineVersionsOutput, bool) bool, ...request.Option) error + + ListExecutors(*athena.ListExecutorsInput) (*athena.ListExecutorsOutput, error) + ListExecutorsWithContext(aws.Context, *athena.ListExecutorsInput, ...request.Option) (*athena.ListExecutorsOutput, error) + ListExecutorsRequest(*athena.ListExecutorsInput) (*request.Request, *athena.ListExecutorsOutput) + + ListExecutorsPages(*athena.ListExecutorsInput, func(*athena.ListExecutorsOutput, bool) bool) error + ListExecutorsPagesWithContext(aws.Context, *athena.ListExecutorsInput, func(*athena.ListExecutorsOutput, bool) bool, ...request.Option) error + ListNamedQueries(*athena.ListNamedQueriesInput) (*athena.ListNamedQueriesOutput, error) ListNamedQueriesWithContext(aws.Context, *athena.ListNamedQueriesInput, ...request.Option) (*athena.ListNamedQueriesOutput, error) ListNamedQueriesRequest(*athena.ListNamedQueriesInput) (*request.Request, *athena.ListNamedQueriesOutput) @@ -160,6 +263,14 @@ type AthenaAPI interface { ListNamedQueriesPages(*athena.ListNamedQueriesInput, func(*athena.ListNamedQueriesOutput, bool) bool) error ListNamedQueriesPagesWithContext(aws.Context, *athena.ListNamedQueriesInput, func(*athena.ListNamedQueriesOutput, bool) bool, ...request.Option) error + ListNotebookMetadata(*athena.ListNotebookMetadataInput) (*athena.ListNotebookMetadataOutput, error) + ListNotebookMetadataWithContext(aws.Context, *athena.ListNotebookMetadataInput, ...request.Option) (*athena.ListNotebookMetadataOutput, error) + ListNotebookMetadataRequest(*athena.ListNotebookMetadataInput) (*request.Request, *athena.ListNotebookMetadataOutput) + + ListNotebookSessions(*athena.ListNotebookSessionsInput) (*athena.ListNotebookSessionsOutput, error) + ListNotebookSessionsWithContext(aws.Context, *athena.ListNotebookSessionsInput, ...request.Option) (*athena.ListNotebookSessionsOutput, error) + ListNotebookSessionsRequest(*athena.ListNotebookSessionsInput) (*request.Request, *athena.ListNotebookSessionsOutput) + ListPreparedStatements(*athena.ListPreparedStatementsInput) (*athena.ListPreparedStatementsOutput, error) ListPreparedStatementsWithContext(aws.Context, *athena.ListPreparedStatementsInput, ...request.Option) (*athena.ListPreparedStatementsOutput, error) ListPreparedStatementsRequest(*athena.ListPreparedStatementsInput) (*request.Request, *athena.ListPreparedStatementsOutput) @@ -174,6 +285,13 @@ type AthenaAPI interface { ListQueryExecutionsPages(*athena.ListQueryExecutionsInput, func(*athena.ListQueryExecutionsOutput, bool) bool) error ListQueryExecutionsPagesWithContext(aws.Context, *athena.ListQueryExecutionsInput, func(*athena.ListQueryExecutionsOutput, bool) bool, ...request.Option) error + ListSessions(*athena.ListSessionsInput) (*athena.ListSessionsOutput, error) + ListSessionsWithContext(aws.Context, *athena.ListSessionsInput, ...request.Option) (*athena.ListSessionsOutput, error) + ListSessionsRequest(*athena.ListSessionsInput) (*request.Request, *athena.ListSessionsOutput) + + ListSessionsPages(*athena.ListSessionsInput, func(*athena.ListSessionsOutput, bool) bool) error + ListSessionsPagesWithContext(aws.Context, *athena.ListSessionsInput, func(*athena.ListSessionsOutput, bool) bool, ...request.Option) error + ListTableMetadata(*athena.ListTableMetadataInput) (*athena.ListTableMetadataOutput, error) ListTableMetadataWithContext(aws.Context, *athena.ListTableMetadataInput, ...request.Option) (*athena.ListTableMetadataOutput, error) ListTableMetadataRequest(*athena.ListTableMetadataInput) (*request.Request, *athena.ListTableMetadataOutput) @@ -195,10 +313,26 @@ type AthenaAPI interface { ListWorkGroupsPages(*athena.ListWorkGroupsInput, func(*athena.ListWorkGroupsOutput, bool) bool) error ListWorkGroupsPagesWithContext(aws.Context, *athena.ListWorkGroupsInput, func(*athena.ListWorkGroupsOutput, bool) bool, ...request.Option) error + PutCapacityAssignmentConfiguration(*athena.PutCapacityAssignmentConfigurationInput) (*athena.PutCapacityAssignmentConfigurationOutput, error) + PutCapacityAssignmentConfigurationWithContext(aws.Context, *athena.PutCapacityAssignmentConfigurationInput, ...request.Option) (*athena.PutCapacityAssignmentConfigurationOutput, error) + PutCapacityAssignmentConfigurationRequest(*athena.PutCapacityAssignmentConfigurationInput) (*request.Request, *athena.PutCapacityAssignmentConfigurationOutput) + + StartCalculationExecution(*athena.StartCalculationExecutionInput) (*athena.StartCalculationExecutionOutput, error) + StartCalculationExecutionWithContext(aws.Context, *athena.StartCalculationExecutionInput, ...request.Option) (*athena.StartCalculationExecutionOutput, error) + StartCalculationExecutionRequest(*athena.StartCalculationExecutionInput) (*request.Request, *athena.StartCalculationExecutionOutput) + StartQueryExecution(*athena.StartQueryExecutionInput) (*athena.StartQueryExecutionOutput, error) StartQueryExecutionWithContext(aws.Context, *athena.StartQueryExecutionInput, ...request.Option) (*athena.StartQueryExecutionOutput, error) StartQueryExecutionRequest(*athena.StartQueryExecutionInput) (*request.Request, *athena.StartQueryExecutionOutput) + StartSession(*athena.StartSessionInput) (*athena.StartSessionOutput, error) + StartSessionWithContext(aws.Context, *athena.StartSessionInput, ...request.Option) (*athena.StartSessionOutput, error) + StartSessionRequest(*athena.StartSessionInput) (*request.Request, *athena.StartSessionOutput) + + StopCalculationExecution(*athena.StopCalculationExecutionInput) (*athena.StopCalculationExecutionOutput, error) + StopCalculationExecutionWithContext(aws.Context, *athena.StopCalculationExecutionInput, ...request.Option) (*athena.StopCalculationExecutionOutput, error) + StopCalculationExecutionRequest(*athena.StopCalculationExecutionInput) (*request.Request, *athena.StopCalculationExecutionOutput) + StopQueryExecution(*athena.StopQueryExecutionInput) (*athena.StopQueryExecutionOutput, error) StopQueryExecutionWithContext(aws.Context, *athena.StopQueryExecutionInput, ...request.Option) (*athena.StopQueryExecutionOutput, error) StopQueryExecutionRequest(*athena.StopQueryExecutionInput) (*request.Request, *athena.StopQueryExecutionOutput) @@ -207,14 +341,34 @@ type AthenaAPI interface { TagResourceWithContext(aws.Context, *athena.TagResourceInput, ...request.Option) (*athena.TagResourceOutput, error) TagResourceRequest(*athena.TagResourceInput) (*request.Request, *athena.TagResourceOutput) + TerminateSession(*athena.TerminateSessionInput) (*athena.TerminateSessionOutput, error) + TerminateSessionWithContext(aws.Context, *athena.TerminateSessionInput, ...request.Option) (*athena.TerminateSessionOutput, error) + TerminateSessionRequest(*athena.TerminateSessionInput) (*request.Request, *athena.TerminateSessionOutput) + UntagResource(*athena.UntagResourceInput) (*athena.UntagResourceOutput, error) UntagResourceWithContext(aws.Context, *athena.UntagResourceInput, ...request.Option) (*athena.UntagResourceOutput, error) UntagResourceRequest(*athena.UntagResourceInput) (*request.Request, *athena.UntagResourceOutput) + UpdateCapacityReservation(*athena.UpdateCapacityReservationInput) (*athena.UpdateCapacityReservationOutput, error) + UpdateCapacityReservationWithContext(aws.Context, *athena.UpdateCapacityReservationInput, ...request.Option) (*athena.UpdateCapacityReservationOutput, error) + UpdateCapacityReservationRequest(*athena.UpdateCapacityReservationInput) (*request.Request, *athena.UpdateCapacityReservationOutput) + UpdateDataCatalog(*athena.UpdateDataCatalogInput) (*athena.UpdateDataCatalogOutput, error) UpdateDataCatalogWithContext(aws.Context, *athena.UpdateDataCatalogInput, ...request.Option) (*athena.UpdateDataCatalogOutput, error) UpdateDataCatalogRequest(*athena.UpdateDataCatalogInput) (*request.Request, *athena.UpdateDataCatalogOutput) + UpdateNamedQuery(*athena.UpdateNamedQueryInput) (*athena.UpdateNamedQueryOutput, error) + UpdateNamedQueryWithContext(aws.Context, *athena.UpdateNamedQueryInput, ...request.Option) (*athena.UpdateNamedQueryOutput, error) + UpdateNamedQueryRequest(*athena.UpdateNamedQueryInput) (*request.Request, *athena.UpdateNamedQueryOutput) + + UpdateNotebook(*athena.UpdateNotebookInput) (*athena.UpdateNotebookOutput, error) + UpdateNotebookWithContext(aws.Context, *athena.UpdateNotebookInput, ...request.Option) (*athena.UpdateNotebookOutput, error) + UpdateNotebookRequest(*athena.UpdateNotebookInput) (*request.Request, *athena.UpdateNotebookOutput) + + UpdateNotebookMetadata(*athena.UpdateNotebookMetadataInput) (*athena.UpdateNotebookMetadataOutput, error) + UpdateNotebookMetadataWithContext(aws.Context, *athena.UpdateNotebookMetadataInput, ...request.Option) (*athena.UpdateNotebookMetadataOutput, error) + UpdateNotebookMetadataRequest(*athena.UpdateNotebookMetadataInput) (*request.Request, *athena.UpdateNotebookMetadataOutput) + UpdatePreparedStatement(*athena.UpdatePreparedStatementInput) (*athena.UpdatePreparedStatementOutput, error) UpdatePreparedStatementWithContext(aws.Context, *athena.UpdatePreparedStatementInput, ...request.Option) (*athena.UpdatePreparedStatementOutput, error) UpdatePreparedStatementRequest(*athena.UpdatePreparedStatementInput) (*request.Request, *athena.UpdatePreparedStatementOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go b/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go index c130729c..eda201e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/doc.go @@ -17,16 +17,12 @@ // support the API. For more information and to download the driver, see Accessing // Amazon Athena with JDBC (https://docs.aws.amazon.com/athena/latest/ug/connect-with-jdbc.html). // -// For code samples using the Amazon Web Services SDK for Java, see Examples -// and Code Samples (https://docs.aws.amazon.com/athena/latest/ug/code-samples.html) -// in the Amazon Athena User Guide. -// // See https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18 for more information on this service. // // See athena package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/athena/ // -// Using the Client +// # Using the Client // // To contact Amazon Athena with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go b/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go index a0939ed6..1685e153 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go @@ -38,6 +38,12 @@ const ( // A resource, such as a workgroup, was not found. ErrCodeResourceNotFoundException = "ResourceNotFoundException" + // ErrCodeSessionAlreadyExistsException for service response error code + // "SessionAlreadyExistsException". + // + // The specified session already exists. + ErrCodeSessionAlreadyExistsException = "SessionAlreadyExistsException" + // ErrCodeTooManyRequestsException for service response error code // "TooManyRequestsException". // @@ -46,9 +52,10 @@ const ( ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "InternalServerException": newErrorInternalServerException, - "InvalidRequestException": newErrorInvalidRequestException, - "MetadataException": newErrorMetadataException, - "ResourceNotFoundException": newErrorResourceNotFoundException, - "TooManyRequestsException": newErrorTooManyRequestsException, + "InternalServerException": newErrorInternalServerException, + "InvalidRequestException": newErrorInvalidRequestException, + "MetadataException": newErrorMetadataException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "SessionAlreadyExistsException": newErrorSessionAlreadyExistsException, + "TooManyRequestsException": newErrorTooManyRequestsException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go b/vendor/github.com/aws/aws-sdk-go/service/athena/service.go index 3b624a85..2991beaf 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/service.go @@ -40,33 +40,39 @@ const ( // aws.Config parameter to add your extra config. // // Example: -// mySession := session.Must(session.NewSession()) // -// // Create a Athena client from just a session. -// svc := athena.New(mySession) +// mySession := session.Must(session.NewSession()) // -// // Create a Athena client with additional configuration -// svc := athena.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +// // Create a Athena client from just a session. +// svc := athena.New(mySession) +// +// // Create a Athena client with additional configuration +// svc := athena.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Athena { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Athena { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *Athena { svc := &Athena{ Client: client.New( cfg, metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2017-05-18", - JSONVersion: "1.1", - TargetPrefix: "AmazonAthena", + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2017-05-18", + ResolvedRegion: resolvedRegion, + JSONVersion: "1.1", + TargetPrefix: "AmazonAthena", }, handlers, ), diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go index e3711e64..b8f590f7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -29,14 +29,13 @@ const opGetRoleCredentials = "GetRoleCredentials" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) // -// // Example sending a request using the GetRoleCredentialsRequest method. -// req, resp := client.GetRoleCredentialsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { @@ -69,20 +68,21 @@ func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *re // API operation GetRoleCredentials for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. // -// * ResourceNotFoundException -// The specified resource doesn't exist. +// - ResourceNotFoundException +// The specified resource doesn't exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { @@ -122,14 +122,13 @@ const opListAccountRoles = "ListAccountRoles" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) // -// // Example sending a request using the ListAccountRolesRequest method. -// req, resp := client.ListAccountRolesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { @@ -167,20 +166,21 @@ func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *reques // API operation ListAccountRoles for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. // -// * ResourceNotFoundException -// The specified resource doesn't exist. +// - ResourceNotFoundException +// The specified resource doesn't exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { @@ -212,15 +212,14 @@ func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRol // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListAccountRoles operation. -// pageNum := 0 -// err := client.ListAccountRolesPages(params, -// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -272,14 +271,13 @@ const opListAccounts = "ListAccounts" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) // -// // Example sending a request using the ListAccountsRequest method. -// req, resp := client.ListAccountsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { @@ -310,7 +308,8 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques // Lists all AWS accounts assigned to the user. These AWS accounts are assigned // by the administrator of the account. For more information, see Assign User // Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) -// in the AWS SSO User Guide. This operation returns a paginated response. +// in the IAM Identity Center User Guide. This operation returns a paginated +// response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -320,20 +319,21 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques // API operation ListAccounts for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * ResourceNotFoundException -// The specified resource doesn't exist. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// - ResourceNotFoundException +// The specified resource doesn't exist. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { @@ -365,15 +365,14 @@ func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListAccounts operation. -// pageNum := 0 -// err := client.ListAccountsPages(params, -// func(page *sso.ListAccountsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) } @@ -425,14 +424,13 @@ const opLogout = "Logout" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) // -// // Example sending a request using the LogoutRequest method. -// req, resp := client.LogoutRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { @@ -455,7 +453,21 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L // Logout API operation for AWS Single Sign-On. // -// Removes the client- and server-side session that is associated with the user. +// Removes the locally stored SSO tokens from the client-side cache and sends +// an API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. +// +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, +// IAM Identity Center assumes an IAM role in the target account on behalf of +// the user, and the corresponding temporary AWS credentials are returned to +// the client. +// +// After user logout, any existing IAM role sessions that were created by using +// IAM Identity Center permission sets continue based on the duration configured +// in the permission set. For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) +// in the IAM Identity Center User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -465,17 +477,18 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L // API operation Logout for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// Indicates that a problem occurred with the input to the request. For example, -// a required parameter might be missing or out of range. // -// * UnauthorizedException -// Indicates that the request is not authorized. This can happen due to an invalid -// access token in the request. +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. // -// * TooManyRequestsException -// Indicates that the request is being made too frequently and is more than -// what the server can handle. +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { @@ -550,11 +563,11 @@ func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { } type GetRoleCredentialsInput struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" nopayload:"true"` // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by GetRoleCredentialsInput's @@ -726,11 +739,11 @@ func (s *InvalidRequestException) RequestID() string { } type ListAccountRolesInput struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" nopayload:"true"` // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by ListAccountRolesInput's @@ -855,11 +868,11 @@ func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOut } type ListAccountsInput struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" nopayload:"true"` // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by ListAccountsInput's @@ -970,11 +983,11 @@ func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { } type LogoutInput struct { - _ struct{} `type:"structure"` + _ struct{} `type:"structure" nopayload:"true"` // The token issued by the CreateToken API call. For more information, see CreateToken // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) - // in the AWS SSO OIDC API Reference Guide. + // in the IAM Identity Center OIDC API Reference Guide. // // AccessToken is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by LogoutInput's diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go index 92d82b2a..15e61a32 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -3,30 +3,31 @@ // Package sso provides the client and types for making API // requests to AWS Single Sign-On. // -// AWS Single Sign-On Portal is a web service that makes it easy for you to -// assign user access to AWS SSO resources such as the user portal. Users can -// get AWS account applications and roles assigned to them and get federated -// into the application. +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity +// Center resources such as the AWS access portal. Users can get AWS account +// applications and roles assigned to them and get federated into the application. // -// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) -// in the AWS SSO User Guide. +// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces +// will continue to retain their original name for backward compatibility purposes. +// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). // -// This API reference guide describes the AWS SSO Portal operations that you -// can call programatically and includes detailed information on data types -// and errors. +// This reference guide describes the IAM Identity Center Portal operations +// that you can call programatically and includes detailed information on data +// types and errors. // // AWS provides SDKs that consist of libraries and sample code for various programming // languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs -// provide a convenient way to create programmatic access to AWS SSO and other -// AWS services. For more information about the AWS SDKs, including how to download -// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// provide a convenient way to create programmatic access to IAM Identity Center +// and other AWS services. For more information about the AWS SDKs, including +// how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). // // See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. // // See sso package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ // -// Using the Client +// # Using the Client // // To contact AWS Single Sign-On with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go index 35175331..7094cfe4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -40,34 +40,36 @@ const ( // aws.Config parameter to add your extra config. // // Example: -// mySession := session.Must(session.NewSession()) // -// // Create a SSO client from just a session. -// svc := sso.New(mySession) +// mySession := session.Must(session.NewSession()) // -// // Create a SSO client with additional configuration -// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { c := p.ClientConfig(EndpointsID, cfgs...) if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "awsssoportal" } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSO { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO { svc := &SSO{ Client: client.New( cfg, metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2019-06-10", + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, }, handlers, ), diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go index 4cac247c..818cab7c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -23,37 +23,37 @@ import ( // can be stubbed out for unit testing your code with the SDK without needing // to inject custom request handlers into the SDK's request pipeline. // -// // myFunc uses an SDK service client to make a request to -// // AWS Single Sign-On. -// func myFunc(svc ssoiface.SSOAPI) bool { -// // Make svc.GetRoleCredentials request -// } +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } // -// func main() { -// sess := session.New() -// svc := sso.New(sess) +// func main() { +// sess := session.New() +// svc := sso.New(sess) // -// myFunc(svc) -// } +// myFunc(svc) +// } // // In your _test.go file: // -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSSOClient struct { -// ssoiface.SSOAPI -// } -// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { -// // mock response/functionality -// } +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } // -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSSOClient{} +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} // -// myfunc(mockSvc) +// myfunc(mockSvc) // -// // Verify myFunc's functionality -// } +// // Verify myFunc's functionality +// } // // It is important to note that this interface will have breaking changes // when the service model is updated and adds new API operations, paginators, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go new file mode 100644 index 00000000..04f6c811 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -0,0 +1,2252 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opCreateToken = "CreateToken" + +// CreateTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateToken for more information on using the CreateToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenRequest method. +// req, resp := client.CreateTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Request, output *CreateTokenOutput) { + op := &request.Operation{ + Name: opCreateToken, + HTTPMethod: "POST", + HTTPPath: "/token", + } + + if input == nil { + input = &CreateTokenInput{} + } + + output = &CreateTokenOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// CreateToken API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients that are authenticated +// using client secrets. The access token can be used to fetch short-term credentials +// for the assigned AWS accounts or to access application APIs using bearer +// authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateToken for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateToken(input *CreateTokenInput) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + return out, req.Send() +} + +// CreateTokenWithContext is the same as CreateToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInput, opts ...request.Option) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTokenWithIAM = "CreateTokenWithIAM" + +// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the +// client's request for the CreateTokenWithIAM operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenWithIAMRequest method. +// req, resp := client.CreateTokenWithIAMRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) { + op := &request.Operation{ + Name: opCreateTokenWithIAM, + HTTPMethod: "POST", + HTTPPath: "/token?aws_iam=t", + } + + if input == nil { + input = &CreateTokenWithIAMInput{} + } + + output = &CreateTokenWithIAMOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTokenWithIAM API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients and applications +// that are authenticated using IAM entities. The access token can be used to +// fetch short-term credentials for the assigned AWS accounts or to access application +// APIs using bearer authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateTokenWithIAM for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// - InvalidRequestRegionException +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + return out, req.Send() +} + +// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTokenWithIAM for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterClient = "RegisterClient" + +// RegisterClientRequest generates a "aws/request.Request" representing the +// client's request for the RegisterClient operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterClient for more information on using the RegisterClient +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RegisterClientRequest method. +// req, resp := client.RegisterClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *request.Request, output *RegisterClientOutput) { + op := &request.Operation{ + Name: opRegisterClient, + HTTPMethod: "POST", + HTTPPath: "/client/register", + } + + if input == nil { + input = &RegisterClientInput{} + } + + output = &RegisterClientOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// RegisterClient API operation for AWS SSO OIDC. +// +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation RegisterClient for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - InvalidClientMetadataException +// Indicates that the client information sent in the request during registration +// is invalid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + return out, req.Send() +} + +// RegisterClientWithContext is the same as RegisterClient with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterClient for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) RegisterClientWithContext(ctx aws.Context, input *RegisterClientInput, opts ...request.Option) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartDeviceAuthorization = "StartDeviceAuthorization" + +// StartDeviceAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the StartDeviceAuthorization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDeviceAuthorization for more information on using the StartDeviceAuthorization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartDeviceAuthorizationRequest method. +// req, resp := client.StartDeviceAuthorizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorizationRequest(input *StartDeviceAuthorizationInput) (req *request.Request, output *StartDeviceAuthorizationOutput) { + op := &request.Operation{ + Name: opStartDeviceAuthorization, + HTTPMethod: "POST", + HTTPPath: "/device_authorization", + } + + if input == nil { + input = &StartDeviceAuthorizationInput{} + } + + output = &StartDeviceAuthorizationOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// StartDeviceAuthorization API operation for AWS SSO OIDC. +// +// Initiates device authorization by requesting a pair of verification codes +// from the authorization service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation StartDeviceAuthorization for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorization(input *StartDeviceAuthorizationInput) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + return out, req.Send() +} + +// StartDeviceAuthorizationWithContext is the same as StartDeviceAuthorization with the addition of +// the ability to pass a context and additional request options. +// +// See StartDeviceAuthorization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) StartDeviceAuthorizationWithContext(ctx aws.Context, input *StartDeviceAuthorizationInput, opts ...request.Option) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be access_denied. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be authorization_pending. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) GoString() string { + return s.String() +} + +func newErrorAuthorizationPendingException(v protocol.ResponseMetadata) error { + return &AuthorizationPendingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AuthorizationPendingException) Code() string { + return "AuthorizationPendingException" +} + +// Message returns the exception's message. +func (s *AuthorizationPendingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AuthorizationPendingException) OrigErr() error { + return nil +} + +func (s *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AuthorizationPendingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AuthorizationPendingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateTokenInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client or application. This value comes + // from the result of the RegisterClient API. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` + + // Used only when calling this API for the Authorization Code grant type. The + // short-term code is used to identify this authorization request. This grant + // type is currently unsupported for the CreateToken API. + Code *string `locationName:"code" type:"string"` + + // Used only when calling this API for the Device Code grant type. This short-term + // code is used to identify this authorization request. This comes from the + // result of the StartDeviceAuthorization API. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Supports the following OAuth grant types: Device Code and Refresh Token. + // Specify either of the following values, depending on the grant type that + // you want: + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + // + // For information about how to obtain the device code, see the StartDeviceAuthorization + // topic. + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If this value is + // not specified, IAM Identity Center authorizes all scopes that are configured + // for the client during the call to RegisterClient. + Scope []*string `locationName:"scope" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenInput) SetClientId(v string) *CreateTokenInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *CreateTokenInput) SetClientSecret(v string) *CreateTokenInput { + s.ClientSecret = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { + s.Code = &v + return s +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { + s.DeviceCode = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenInput) SetGrantType(v string) *CreateTokenInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenInput) SetRedirectUri(v string) *CreateTokenInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenInput) SetRefreshToken(v string) *CreateTokenInput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { + s.Scope = v + return s +} + +type CreateTokenOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access AWS accounts and applications assigned to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // The idToken is not implemented or supported. For more information about the + // features and limitations of the current IAM Identity Center OIDC implementation, + // see Considerations for Using this Guide in the IAM Identity Center OIDC API + // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // A JSON Web Token (JWT) that identifies who is associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used to notify the client that the returned token is an access token. The + // supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenOutput) SetAccessToken(v string) *CreateTokenOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenOutput) SetExpiresIn(v int64) *CreateTokenOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenOutput) SetIdToken(v string) *CreateTokenOutput { + s.IdToken = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenOutput) SetRefreshToken(v string) *CreateTokenOutput { + s.RefreshToken = &v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput { + s.TokenType = &v + return s +} + +type CreateTokenWithIAMInput struct { + _ struct{} `type:"structure"` + + // Used only when calling this API for the JWT Bearer grant type. This value + // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize + // a trusted token issuer, configure the JWT Bearer GrantOptions for the application. + // + // Assertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + Assertion *string `locationName:"assertion" type:"string" sensitive:"true"` + + // The unique identifier string for the client or application. This value is + // an application ARN that has OAuth grants configured. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // short-term code is used to identify this authorization request. The code + // is obtained through a redirect from IAM Identity Center to a redirect URI + // persisted in the Authorization Code GrantOptions for the application. + Code *string `locationName:"code" type:"string"` + + // Supports the following OAuth grant types: Authorization Code, Refresh Token, + // JWT Bearer, and Token Exchange. Specify one of the following values, depending + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that the requester can receive. The following + // values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + RequestedTokenType *string `locationName:"requestedTokenType" type:"string"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If the value is + // not specified, IAM Identity Center authorizes all scopes configured for the + // application, including the following default scopes: openid, aws, sts:identity_context. + Scope []*string `locationName:"scope" type:"list"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the subject of the exchange. The value of the subject token must + // be an access token issued by IAM Identity Center to a different client or + // application. The access token must have authorized scopes that indicate the + // requested application as a target audience. + // + // SubjectToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that is passed as the subject of the exchange. + // The following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + SubjectTokenType *string `locationName:"subjectTokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenWithIAMInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssertion sets the Assertion field's value. +func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput { + s.Assertion = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput { + s.ClientId = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { + s.Code = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput { + s.RefreshToken = &v + return s +} + +// SetRequestedTokenType sets the RequestedTokenType field's value. +func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput { + s.RequestedTokenType = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput { + s.Scope = v + return s +} + +// SetSubjectToken sets the SubjectToken field's value. +func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput { + s.SubjectToken = &v + return s +} + +// SetSubjectTokenType sets the SubjectTokenType field's value. +func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput { + s.SubjectTokenType = &v + return s +} + +type CreateTokenWithIAMOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access AWS accounts and applications assigned to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // A JSON Web Token (JWT) that identifies the user associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // Indicates the type of tokens that are issued by IAM Identity Center. The + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + IssuedTokenType *string `locationName:"issuedTokenType" type:"string"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is granted. The access token that + // is issued is limited to the scopes that are granted. + Scope []*string `locationName:"scope" type:"list"` + + // Used to notify the requester that the returned token is an access token. + // The supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput { + s.IdToken = &v + return s +} + +// SetIssuedTokenType sets the IssuedTokenType field's value. +func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput { + s.IssuedTokenType = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput { + s.Scope = v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput { + s.TokenType = &v + return s +} + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be expired_token. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) GoString() string { + return s.String() +} + +func newErrorExpiredTokenException(v protocol.ResponseMetadata) error { + return &ExpiredTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExpiredTokenException) Code() string { + return "ExpiredTokenException" +} + +// Message returns the exception's message. +func (s *ExpiredTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExpiredTokenException) OrigErr() error { + return nil +} + +func (s *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExpiredTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExpiredTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that an error from the service occurred while trying to process +// a request. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be server_error. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +type InvalidClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_client. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) GoString() string { + return s.String() +} + +func newErrorInvalidClientException(v protocol.ResponseMetadata) error { + return &InvalidClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientException) Code() string { + return "InvalidClientException" +} + +// Message returns the exception's message. +func (s *InvalidClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientException) OrigErr() error { + return nil +} + +func (s *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_client_metadata. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) GoString() string { + return s.String() +} + +func newErrorInvalidClientMetadataException(v protocol.ResponseMetadata) error { + return &InvalidClientMetadataException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientMetadataException) Code() string { + return "InvalidClientMetadataException" +} + +// Message returns the exception's message. +func (s *InvalidClientMetadataException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientMetadataException) OrigErr() error { + return nil +} + +func (s *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientMetadataException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientMetadataException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_grant. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) GoString() string { + return s.String() +} + +func newErrorInvalidGrantException(v protocol.ResponseMetadata) error { + return &InvalidGrantException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidGrantException) Code() string { + return "InvalidGrantException" +} + +// Message returns the exception's message. +func (s *InvalidGrantException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidGrantException) OrigErr() error { + return nil +} + +func (s *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidGrantException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidGrantException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +type InvalidRequestRegionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Indicates the IAM Identity Center endpoint which the requester may call with + // this token. + Endpoint *string `locationName:"endpoint" type:"string"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` + + // Indicates the region which the requester may call with this token. + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error { + return &InvalidRequestRegionException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestRegionException) Code() string { + return "InvalidRequestRegionException" +} + +// Message returns the exception's message. +func (s *InvalidRequestRegionException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestRegionException) OrigErr() error { + return nil +} + +func (s *InvalidRequestRegionException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestRegionException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestRegionException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_scope. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) GoString() string { + return s.String() +} + +func newErrorInvalidScopeException(v protocol.ResponseMetadata) error { + return &InvalidScopeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidScopeException) Code() string { + return "InvalidScopeException" +} + +// Message returns the exception's message. +func (s *InvalidScopeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidScopeException) OrigErr() error { + return nil +} + +func (s *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidScopeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidScopeException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RegisterClientInput struct { + _ struct{} `type:"structure"` + + // The friendly name of the client. + // + // ClientName is a required field + ClientName *string `locationName:"clientName" type:"string" required:"true"` + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // ClientType is a required field + ClientType *string `locationName:"clientType" type:"string" required:"true"` + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []*string `locationName:"scopes" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterClientInput"} + if s.ClientName == nil { + invalidParams.Add(request.NewErrParamRequired("ClientName")) + } + if s.ClientType == nil { + invalidParams.Add(request.NewErrParamRequired("ClientType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientName sets the ClientName field's value. +func (s *RegisterClientInput) SetClientName(v string) *RegisterClientInput { + s.ClientName = &v + return s +} + +// SetClientType sets the ClientType field's value. +func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { + s.ClientType = &v + return s +} + +// SetScopes sets the Scopes field's value. +func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { + s.Scopes = v + return s +} + +type RegisterClientOutput struct { + _ struct{} `type:"structure"` + + // An endpoint that the client can use to request authorization. + AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"` + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string `locationName:"clientId" type:"string"` + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt *int64 `locationName:"clientIdIssuedAt" type:"long"` + + // A secret string generated for the client. The client will use this string + // to get authenticated by the service in subsequent calls. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RegisterClientOutput's + // String and GoString methods. + ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"` + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"` + + // An endpoint that the client can use to create tokens. + TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) GoString() string { + return s.String() +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *RegisterClientOutput) SetAuthorizationEndpoint(v string) *RegisterClientOutput { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *RegisterClientOutput) SetClientId(v string) *RegisterClientOutput { + s.ClientId = &v + return s +} + +// SetClientIdIssuedAt sets the ClientIdIssuedAt field's value. +func (s *RegisterClientOutput) SetClientIdIssuedAt(v int64) *RegisterClientOutput { + s.ClientIdIssuedAt = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *RegisterClientOutput) SetClientSecret(v string) *RegisterClientOutput { + s.ClientSecret = &v + return s +} + +// SetClientSecretExpiresAt sets the ClientSecretExpiresAt field's value. +func (s *RegisterClientOutput) SetClientSecretExpiresAt(v int64) *RegisterClientOutput { + s.ClientSecretExpiresAt = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *RegisterClientOutput) SetTokenEndpoint(v string) *RegisterClientOutput { + s.TokenEndpoint = &v + return s +} + +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +type SlowDownException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be slow_down. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) GoString() string { + return s.String() +} + +func newErrorSlowDownException(v protocol.ResponseMetadata) error { + return &SlowDownException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SlowDownException) Code() string { + return "SlowDownException" +} + +// Message returns the exception's message. +func (s *SlowDownException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SlowDownException) OrigErr() error { + return nil +} + +func (s *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SlowDownException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SlowDownException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartDeviceAuthorizationInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client that is registered with IAM Identity + // Center. This value should come from the persisted result of the RegisterClient + // API operation. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string that is generated for the client. This value should come + // from the persisted result of the RegisterClient API operation. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's + // String and GoString methods. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` + + // The URL for the Amazon Web Services access portal. For more information, + // see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // StartUrl is a required field + StartUrl *string `locationName:"startUrl" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDeviceAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDeviceAuthorizationInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.StartUrl == nil { + invalidParams.Add(request.NewErrParamRequired("StartUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *StartDeviceAuthorizationInput) SetClientId(v string) *StartDeviceAuthorizationInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *StartDeviceAuthorizationInput) SetClientSecret(v string) *StartDeviceAuthorizationInput { + s.ClientSecret = &v + return s +} + +// SetStartUrl sets the StartUrl field's value. +func (s *StartDeviceAuthorizationInput) SetStartUrl(v string) *StartDeviceAuthorizationInput { + s.StartUrl = &v + return s +} + +type StartDeviceAuthorizationOutput struct { + _ struct{} `type:"structure"` + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval *int64 `locationName:"interval" type:"integer"` + + // A one-time user verification code. This is needed to authorize an in-use + // device. + UserCode *string `locationName:"userCode" type:"string"` + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string `locationName:"verificationUri" type:"string"` + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string `locationName:"verificationUriComplete" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) GoString() string { + return s.String() +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *StartDeviceAuthorizationOutput) SetDeviceCode(v string) *StartDeviceAuthorizationOutput { + s.DeviceCode = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *StartDeviceAuthorizationOutput) SetExpiresIn(v int64) *StartDeviceAuthorizationOutput { + s.ExpiresIn = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *StartDeviceAuthorizationOutput) SetInterval(v int64) *StartDeviceAuthorizationOutput { + s.Interval = &v + return s +} + +// SetUserCode sets the UserCode field's value. +func (s *StartDeviceAuthorizationOutput) SetUserCode(v string) *StartDeviceAuthorizationOutput { + s.UserCode = &v + return s +} + +// SetVerificationUri sets the VerificationUri field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUri(v string) *StartDeviceAuthorizationOutput { + s.VerificationUri = &v + return s +} + +// SetVerificationUriComplete sets the VerificationUriComplete field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUriComplete(v string) *StartDeviceAuthorizationOutput { + s.VerificationUriComplete = &v + return s +} + +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be unauthorized_client. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedClientException(v protocol.ResponseMetadata) error { + return &UnauthorizedClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedClientException) Code() string { + return "UnauthorizedClientException" +} + +// Message returns the exception's message. +func (s *UnauthorizedClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedClientException) OrigErr() error { + return nil +} + +func (s *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be unsupported_grant_type. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedGrantTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedGrantTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedGrantTypeException) Code() string { + return "UnsupportedGrantTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedGrantTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedGrantTypeException) OrigErr() error { + return nil +} + +func (s *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedGrantTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedGrantTypeException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go new file mode 100644 index 00000000..083568c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -0,0 +1,67 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssooidc provides the client and types for making API +// requests to AWS SSO OIDC. +// +// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a +// client (such as CLI or a native application) to register with IAM Identity +// Center. The service also enables the client to fetch the user’s access +// token upon successful authentication and authorization with IAM Identity +// Center. +// +// IAM Identity Center uses the sso and identitystore API namespaces. +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628 +// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single +// sign-on authentication with the CLI. +// +// - With older versions of the CLI, the service only emits OIDC access tokens, +// so to obtain a new token, users must explicitly re-authenticate. To access +// the OIDC flow that supports token refresh and doesn’t require re-authentication, +// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI +// V2) with support for OIDC token refresh and configurable IAM Identity +// Center session durations. For more information, see Configure Amazon Web +// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html). +// +// - The access tokens provided by this service grant access to all Amazon +// Web Services account entitlements assigned to an IAM Identity Center user, +// not just a particular application. +// +// - The documentation in this guide does not describe the mechanism to convert +// the access token into Amazon Web Services Auth (“sigv4â€) credentials +// for use with IAM-protected Amazon Web Services service endpoints. For +// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10 for more information on this service. +// +// See ssooidc package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/ +// +// # Using the Client +// +// To contact AWS SSO OIDC with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS SSO OIDC client SSOOIDC for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go new file mode 100644 index 00000000..e6242e49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -0,0 +1,115 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeAuthorizationPendingException for service response error code + // "AuthorizationPendingException". + // + // Indicates that a request to authorize a client with an access user session + // token is pending. + ErrCodeAuthorizationPendingException = "AuthorizationPendingException" + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // Indicates that the token issued by the service is expired and is no longer + // valid. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // Indicates that an error from the service occurred while trying to process + // a request. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeInvalidClientException for service response error code + // "InvalidClientException". + // + // Indicates that the clientId or clientSecret in the request is invalid. For + // example, this can occur when a client sends an incorrect clientId or an expired + // clientSecret. + ErrCodeInvalidClientException = "InvalidClientException" + + // ErrCodeInvalidClientMetadataException for service response error code + // "InvalidClientMetadataException". + // + // Indicates that the client information sent in the request during registration + // is invalid. + ErrCodeInvalidClientMetadataException = "InvalidClientMetadataException" + + // ErrCodeInvalidGrantException for service response error code + // "InvalidGrantException". + // + // Indicates that a request contains an invalid grant. This can occur if a client + // makes a CreateToken request with an invalid grant type. + ErrCodeInvalidGrantException = "InvalidGrantException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that something is wrong with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeInvalidRequestRegionException for service response error code + // "InvalidRequestRegionException". + // + // Indicates that a token provided as input to the request was issued by and + // is only usable by calling IAM Identity Center endpoints in another region. + ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException" + + // ErrCodeInvalidScopeException for service response error code + // "InvalidScopeException". + // + // Indicates that the scope provided in the request is invalid. + ErrCodeInvalidScopeException = "InvalidScopeException" + + // ErrCodeSlowDownException for service response error code + // "SlowDownException". + // + // Indicates that the client is making the request too frequently and is more + // than the service can handle. + ErrCodeSlowDownException = "SlowDownException" + + // ErrCodeUnauthorizedClientException for service response error code + // "UnauthorizedClientException". + // + // Indicates that the client is not currently authorized to make the request. + // This can happen when a clientId is not issued for a public client. + ErrCodeUnauthorizedClientException = "UnauthorizedClientException" + + // ErrCodeUnsupportedGrantTypeException for service response error code + // "UnsupportedGrantTypeException". + // + // Indicates that the grant type in the request is not supported by the service. + ErrCodeUnsupportedGrantTypeException = "UnsupportedGrantTypeException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "AuthorizationPendingException": newErrorAuthorizationPendingException, + "ExpiredTokenException": newErrorExpiredTokenException, + "InternalServerException": newErrorInternalServerException, + "InvalidClientException": newErrorInvalidClientException, + "InvalidClientMetadataException": newErrorInvalidClientMetadataException, + "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRequestException": newErrorInvalidRequestException, + "InvalidRequestRegionException": newErrorInvalidRequestRegionException, + "InvalidScopeException": newErrorInvalidScopeException, + "SlowDownException": newErrorSlowDownException, + "UnauthorizedClientException": newErrorUnauthorizedClientException, + "UnsupportedGrantTypeException": newErrorUnsupportedGrantTypeException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go new file mode 100644 index 00000000..782bae36 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSOOIDC provides the API operation methods for making requests to +// AWS SSO OIDC. See this package's package overview docs +// for details on the service. +// +// SSOOIDC methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSOOIDC struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO OIDC" // Name of service. + EndpointsID = "oidc" // ID to lookup a service endpoint with. + ServiceID = "SSO OIDC" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSOOIDC client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a SSOOIDC client from just a session. +// svc := ssooidc.New(mySession) +// +// // Create a SSOOIDC client with additional configuration +// svc := ssooidc.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "sso-oauth" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSOOIDC { + svc := &SSOOIDC{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSOOIDC operation and runs any +// custom request initialization. +func (c *SSOOIDC) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index a1a8a095..2c395f5f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -28,14 +28,13 @@ const opAssumeRole = "AssumeRole" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) // -// // Example sending a request using the AssumeRoleRequest method. -// req, resp := client.AssumeRoleRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { @@ -57,58 +56,65 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // AssumeRole API operation for AWS Security Token Service. // // Returns a set of temporary security credentials that you can use to access -// Amazon Web Services resources that you might not normally have access to. -// These temporary credentials consist of an access key ID, a secret access -// key, and a security token. Typically, you use AssumeRole within your account -// or for cross-account access. For a comparison of AssumeRole with other API -// operations that produce temporary credentials, see Requesting Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Amazon Web Services resources. These temporary credentials consist of an +// access key ID, a secret access key, and a security token. Typically, you +// use AssumeRole within your account or for cross-account access. For a comparison +// of AssumeRole with other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// Permissions +// # Permissions // // The temporary security credentials created by AssumeRole can be used to make // API calls to any Amazon Web Services service with the following exception: -// You cannot call the STS GetFederationToken or GetSessionToken API operations. +// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// To assume a role from a different account, your account must be trusted by -// the role. The trust relationship is defined in the role's trust policy when -// the role is created. That trust policy states which accounts are allowed -// to delegate that access to users in the account. +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal that is +// allowed to assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. // // A user who wants to access a role in a different account must also have permissions -// that are delegated from the user account administrator. The administrator -// must attach a policy that allows the user to call AssumeRole for the ARN -// of the role in the other account. If the user is in the same account as the -// role, then you can do either of the following: +// that are delegated from the account administrator. The administrator must +// attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of +// the following: // -// * Attach a policy to the user (identical to the previous user in a different -// account). +// - Attach a policy to the user that allows the user to call AssumeRole +// (as long as the role's trust policy trusts the account). // -// * Add the user as a principal directly in the role's trust policy. +// - Add the user as a principal directly in the role's trust policy. // -// In this case, the trust policy acts as an IAM resource-based policy. Users -// in the same account as the role do not need explicit permission to assume -// the role. For more information about trust policies and resource-based policies, -// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the +// same account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) // in the IAM User Guide. // -// Tags +// # Tags // // (Optional) You can pass tag key-value pairs to your session. These tags are // called session tags. For more information about session tags, see Passing @@ -126,7 +132,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // -// Using MFA with AssumeRole +// # Using MFA with AssumeRole // // (Optional) You can include multi-factor authentication (MFA) information // when you call AssumeRole. This is useful for cross-account scenarios to ensure @@ -155,35 +161,36 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // API operation AssumeRole for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { @@ -223,14 +230,13 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) // -// // Example sending a request using the AssumeRoleWithSAMLRequest method. -// req, resp := client.AssumeRoleWithSAMLRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { @@ -258,7 +264,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // Services access without user-specific credentials or configuration. For a // comparison of AssumeRoleWithSAML with the other API operations that produce // temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this operation consist of @@ -266,7 +272,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // can use these temporary security credentials to sign calls to Amazon Web // Services services. // -// Session Duration +// # Session Duration // // By default, the temporary security credentials created by AssumeRoleWithSAML // last for one hour. However, you can use the optional DurationSeconds parameter @@ -292,7 +298,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // a role using role chaining and provide a DurationSeconds parameter value // greater than one hour, the operation fails. // -// Permissions +// # Permissions // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any Amazon Web Services service with the following exception: @@ -300,16 +306,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Calling AssumeRoleWithSAML does not require the use of Amazon Web Services @@ -323,7 +329,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // identifiable information (PII). For example, you could instead use the persistent // identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). // -// Tags +// # Tags // // (Optional) You can configure your IdP to pass attributes into your SAML assertion // as session tags. Each session tag consists of a key name and an associated @@ -336,11 +342,12 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, session tags override the role's tags with the same @@ -357,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // -// SAML Configuration +// # SAML Configuration // // Before your application can call AssumeRoleWithSAML, you must configure your // SAML identity provider (IdP) to issue the claims required by Amazon Web Services. @@ -368,17 +375,17 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // For more information, see the following resources: // -// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) -// in the IAM User Guide. +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. // -// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) -// in the IAM User Guide. +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. // -// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) -// in the IAM User Guide. +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. // -// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) -// in the IAM User Guide. +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -388,47 +395,48 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // API operation AssumeRoleWithSAML for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { @@ -468,14 +476,13 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) // -// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. -// req, resp := client.AssumeRoleWithWebIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { @@ -499,8 +506,9 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // Returns a set of temporary security credentials for users who have been authenticated // in a mobile or web application with a web identity provider. Example providers -// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID -// Connect-compatible identity provider. +// include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID +// Connect-compatible identity provider such as Google or Amazon Cognito federated +// identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). // // For mobile applications, we recommend that you use Amazon Cognito. You can // use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide @@ -509,10 +517,8 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // a user. You can also supply the user with a consistent identity throughout // the lifetime of an application. // -// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito -// Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) -// in the Amazon Web Services SDK for iOS Developer Guide. +// To learn more about Amazon Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web // Services security credentials. Therefore, you can distribute an application @@ -523,7 +529,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this API consist of an access @@ -531,7 +537,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // temporary security credentials to sign calls to Amazon Web Services service // API operations. // -// Session Duration +// # Session Duration // // By default, the temporary security credentials created by AssumeRoleWithWebIdentity // last for one hour. However, you can use the optional DurationSeconds parameter @@ -546,7 +552,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // -// Permissions +// # Permissions // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any Amazon Web Services service with the following @@ -555,19 +561,19 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. Passing policies -// to this operation returns new temporary credentials. The resulting session's -// permissions are the intersection of the role's identity-based policy and -// the session policies. You can use the role's temporary credentials in subsequent -// Amazon Web Services API calls to access resources in the account that owns -// the role. You cannot use session policies to grant more permissions than -// those allowed by the identity-based policy of the role that is being assumed. -// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// Tags +// # Tags // // (Optional) You can configure your IdP to pass attributes into your web identity // token as session tags. Each session tag consists of a key name and an associated @@ -580,11 +586,12 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // -// An Amazon Web Services conversion compresses the passed session policies -// and session tags into a packed binary format that has a separate limit. Your -// request can fail for this limit even if your plaintext meets the other requirements. -// The PackedPolicySize response element indicates by percentage how close the -// policies and tags for your request are to the upper size limit. +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, the session tag overrides the role tag with the same @@ -601,7 +608,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) // in the IAM User Guide. // -// Identities +// # Identities // // Before your application can call AssumeRoleWithWebIdentity, you must have // an identity token from a supported identity provider and create a role that @@ -619,24 +626,24 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // For more information about how to use web identity federation and the AssumeRoleWithWebIdentity // API, see the following resources: // -// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). -// Walk through the process of authenticating through Login with Amazon, -// Facebook, or Google, getting temporary security credentials, and then -// using those credentials to make a request to Amazon Web Services. +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to Amazon Web Services. // -// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) -// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). -// These toolkits contain sample apps that show how to invoke the identity -// providers. The toolkits then show how to use the information from these -// providers to get and use temporary security credentials. +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. // -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). -// This article discusses web identity federation and shows an example of -// how to use web identity federation to get access to content in Amazon -// S3. +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -646,54 +653,55 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // API operation AssumeRoleWithWebIdentity for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" -// The identity provider (IdP) reported that authentication failed. This might -// be because the claim is invalid. -// -// If this error is returned for the AssumeRoleWithWebIdentity operation, it -// can also mean that the claim has expired or has been explicitly revoked. -// -// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" -// The request could not be fulfilled because the identity provider (IDP) that -// was asked to verify the incoming identity token could not be reached. This -// is often a transient error caused by network conditions. Retry the request -// a limited number of times so that you don't exceed the request rate. If the -// error persists, the identity provider might be down or not responding. -// -// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" -// The web identity token that was passed could not be validated by Amazon Web -// Services. Get a new identity token from the identity provider and then retry -// the request. -// -// * ErrCodeExpiredTokenException "ExpiredTokenException" -// The web identity token that was passed is expired or is not valid. Get a -// new identity token from the identity provider and then retry the request. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { @@ -733,14 +741,13 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) // -// // Example sending a request using the DecodeAuthorizationMessageRequest method. -// req, resp := client.DecodeAuthorizationMessageRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { @@ -776,25 +783,26 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // code. // // The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the operation +// contain privileged information that the user who requested the operation // should not see. To decode an authorization status message, a user must be -// granted permissions via an IAM policy to request the DecodeAuthorizationMessage -// (sts:DecodeAuthorizationMessage) action. +// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) +// action. // // The decoded message includes the following type of information: // -// * Whether the request was denied due to an explicit deny or due to the -// absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) -// in the IAM User Guide. +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. // -// * The principal who made the request. +// - The principal who made the request. // -// * The requested action. +// - The requested action. // -// * The requested resource. +// - The requested resource. // -// * The values of condition keys in the context of the user's request. +// - The values of condition keys in the context of the user's request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -804,10 +812,10 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // API operation DecodeAuthorizationMessage for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. +// - ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { @@ -847,14 +855,13 @@ const opGetAccessKeyInfo = "GetAccessKeyInfo" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) // -// // Example sending a request using the GetAccessKeyInfoRequest method. -// req, resp := client.GetAccessKeyInfoRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { @@ -944,14 +951,13 @@ const opGetCallerIdentity = "GetCallerIdentity" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) // -// // Example sending a request using the GetCallerIdentityRequest method. -// req, resp := client.GetCallerIdentityRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { @@ -976,11 +982,11 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // call the operation. // // No permissions are required to perform this operation. If an administrator -// adds a policy to your IAM user or role that explicitly denies access to the -// sts:GetCallerIdentity action, you can still perform this operation. Permissions -// are not required because the same information is returned when an IAM user -// or role is denied access. To view an example response, see I Am Not Authorized -// to Perform: iam:DeleteVirtualMFADevice (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// attaches a policy to your identity that explicitly denies access to the sts:GetCallerIdentity +// action, you can still perform this operation. Permissions are not required +// because the same information is returned when access is denied. To view an +// example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1027,14 +1033,13 @@ const opGetFederationToken = "GetFederationToken" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) // -// // Example sending a request using the GetFederationTokenRequest method. -// req, resp := client.GetFederationTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { @@ -1056,16 +1061,24 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // GetFederationToken API operation for AWS Security Token Service. // // Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) for a federated user. -// A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. You must -// call the GetFederationToken operation using the long-term security credentials -// of an IAM user. As a result, this call is appropriate in contexts where those -// credentials can be safely stored, usually in a server-based application. +// key ID, a secret access key, and a security token) for a user. A typical +// use is in a proxy application that gets temporary security credentials on +// behalf of distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based application. // For a comparison of GetFederationToken with the other API operations that // produce temporary credentials, see Requesting Temporary Security Credentials // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Although it is possible to call GetFederationToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user that +// you create for the purpose of a proxy application, we do not recommend it. +// For more information, see Safeguard your root user credentials and don't +// use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) // in the IAM User Guide. // // You can create a mobile-based or browser-based app that can authenticate @@ -1076,37 +1089,31 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. // -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// Session duration +// # Session duration // // The temporary credentials are valid for the specified duration, from 900 // seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using Amazon Web Services account root user credentials have -// a maximum duration of 3,600 seconds (1 hour). +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained +// by using the root user credentials have a maximum duration of 3,600 seconds +// (1 hour). // -// Permissions +// # Permissions // // You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: +// Amazon Web Services service with the following exceptions: +// +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. This limitation does not apply to console sessions. // -// * You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. +// - You cannot call any STS operations except GetCallerIdentity. // -// * You cannot call any STS operations except GetCallerIdentity. +// You can use temporary credentials for single sign-on (SSO) to the console. // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plaintext that you use for both inline -// and managed session policies can't exceed 2,048 characters. +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. // // Though the session policy parameters are optional, if you do not pass a policy, // then the resulting federated user session has no permissions. When you pass @@ -1126,7 +1133,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // by the policy. These permissions are granted in addition to the permissions // granted by the session policies. // -// Tags +// # Tags // // (Optional) You can pass tag key-value pairs to your session. These are called // session tags. For more information about session tags, see Passing Session @@ -1141,63 +1148,6 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) // in the IAM User Guide. // -// You can also call GetFederationToken using the security credentials of an -// Amazon Web Services account root user, but we do not recommend it. Instead, -// we recommend that you create an IAM user for the purpose of the proxy application. -// Then attach a policy to the IAM user that limits federated users to only -// the actions and resources that they need to access. For more information, -// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) -// in the IAM User Guide. -// -// Session duration -// -// The temporary credentials are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default -// session duration is 43,200 seconds (12 hours). Temporary credentials that -// are obtained by using Amazon Web Services account root user credentials have -// a maximum duration of 3,600 seconds (1 hour). -// -// Permissions -// -// You can use the temporary credentials created by GetFederationToken in any -// Amazon Web Services service except the following: -// -// * You cannot call any IAM operations using the CLI or the Amazon Web Services -// API. -// -// * You cannot call any STS operations except GetCallerIdentity. -// -// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// to this operation. You can pass a single JSON policy document to use as an -// inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline -// and managed session policies can't exceed 2,048 characters. -// -// Though the session policy parameters are optional, if you do not pass a policy, -// then the resulting federated user session has no permissions. When you pass -// session policies, the session permissions are the intersection of the IAM -// user policies and the session policies that you pass. This gives you a way -// to further restrict the permissions for a federated user. You cannot use -// session policies to grant more permissions than those that are defined in -// the permissions policy of the IAM user. For more information, see Session -// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) -// in the IAM User Guide. For information about using GetFederationToken to -// create temporary security credentials, see GetFederationToken—Federation -// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). -// -// You can use the credentials to access a resource that has a resource-based -// policy. If that policy specifically references the federated user session -// in the Principal element of the policy, the session has the permissions allowed -// by the policy. These permissions are granted in addition to the permissions -// granted by the session policies. -// -// Tags -// -// (Optional) You can pass tag key-value pairs to your session. These are called -// session tags. For more information about session tags, see Passing Session -// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// // An administrator must grant you the permissions necessary to pass session // tags. The administrator can also create granular permissions to allow you // to pass only specific session tags. For more information, see Tutorial: Using @@ -1219,31 +1169,32 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // API operation GetFederationToken for usage and error information. // // Returned Error Codes: -// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" -// The request was rejected because the policy document was malformed. The error -// message describes the specific error. -// -// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" -// The request was rejected because the total packed size of the session policies -// and session tags combined was too large. An Amazon Web Services conversion -// compresses the session policy document, session policy ARNs, and session -// tags into a packed binary format that has a separate limit. The error message -// indicates by percentage how close the policies and tags are to the upper -// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) -// in the IAM User Guide. -// -// You could receive this error even though you meet other defined session policy -// and session tag limits. For more information, see IAM and STS Entity Character -// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { @@ -1283,14 +1234,13 @@ const opGetSessionToken = "GetSessionToken" // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) // -// // Example sending a request using the GetSessionTokenRequest method. -// req, resp := client.GetSessionTokenRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { @@ -1315,52 +1265,58 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // or IAM user. The credentials consist of an access key ID, a secret access // key, and a security token. Typically, you use GetSessionToken if you want // to use MFA to protect programmatic calls to specific Amazon Web Services -// API operations like Amazon EC2 StopInstances. MFA-enabled IAM users would -// need to call GetSessionToken and submit an MFA code that is associated with -// their MFA device. Using the temporary security credentials that are returned -// from the call, IAM users can then make programmatic calls to API operations -// that require MFA authentication. If you do not supply a correct MFA code, -// then the API returns an access denied error. For a comparison of GetSessionToken +// API operations like Amazon EC2 StopInstances. +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that the call returns, IAM users can then make programmatic calls to API +// operations that require MFA authentication. An incorrect MFA code causes +// the API to return an access denied error. For a comparison of GetSessionToken // with the other API operations that produce temporary credentials, see Requesting // Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose +// of the sts:GetSessionToken operation is to authenticate the user using MFA. +// You cannot use policies to control authentication operations. For more information, +// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) // in the IAM User Guide. // -// Session Duration +// # Session Duration // // The GetSessionToken operation must be called by using the long-term Amazon -// Web Services security credentials of the Amazon Web Services account root -// user or an IAM user. Credentials that are created by IAM users are valid -// for the duration that you specify. This duration can range from 900 seconds -// (15 minutes) up to a maximum of 129,600 seconds (36 hours), with a default -// of 43,200 seconds (12 hours). Credentials based on account credentials can -// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a -// default of 1 hour. +// Web Services security credentials of an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify. This duration can +// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 +// hours), with a default of 43,200 seconds (12 hours). Credentials based on +// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds +// (1 hour), with a default of 1 hour. // -// Permissions +// # Permissions // // The temporary security credentials created by GetSessionToken can be used // to make API calls to any Amazon Web Services service with the following exceptions: // -// * You cannot call any IAM API operations unless MFA authentication information -// is included in the request. +// - You cannot call any IAM API operations unless MFA authentication information +// is included in the request. // -// * You cannot call any STS API except AssumeRole or GetCallerIdentity. +// - You cannot call any STS API except AssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with Amazon Web Services -// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) -// by creating one or more IAM users, giving them the necessary permissions, -// and using IAM users for everyday interaction with Amazon Web Services. +// The credentials that GetSessionToken returns are based on permissions associated +// with the IAM user whose credentials were used to call the operation. The +// temporary credentials have the same permissions as the IAM user. // -// The credentials that are returned by GetSessionToken are based on permissions -// associated with the user whose credentials were used to call the operation. -// If GetSessionToken is called using Amazon Web Services account root user -// credentials, the temporary credentials have root user permissions. Similarly, -// if GetSessionToken is called using the credentials of an IAM user, the temporary -// credentials have the same permissions as the IAM user. +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do +// not recommend it. If GetSessionToken is called using root user credentials, +// the temporary credentials have root user permissions. For more information, +// see Safeguard your root user credentials and don't use them for everyday +// tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide // // For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// see Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1371,13 +1327,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // API operation GetSessionToken for usage and error information. // // Returned Error Codes: -// * ErrCodeRegionDisabledException "RegionDisabledException" -// STS is not activated in the requested region for the account that is being -// asked to generate credentials. The account administrator must use the IAM -// console to activate STS in that region. For more information, see Activating -// and Deactivating Amazon Web Services STS in an Amazon Web Services Region -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { @@ -1404,15 +1360,23 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken type AssumeRoleInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value specified can can - // range from 900 seconds (15 minutes) up to the maximum session duration that - // is set for the role. The maximum session duration setting can have a value - // from 1 hour to 12 hours. If you specify a value higher than this setting - // or the administrator setting (whichever is lower), the operation fails. For - // example, if you specify a session duration of 12 hours, but your administrator - // set the maximum session duration to 6 hours, your operation fails. To learn - // how to view the maximum value for your role, see View the Maximum Session - // Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for + // the role. The maximum session duration setting can have a value from 1 hour + // to 12 hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services + // API role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of + // up to 43200 seconds (12 hours), depending on the maximum session duration + // setting for your role. However, if you assume a role using role chaining + // and provide a DurationSeconds parameter value greater than one hour, the + // operation fails. To learn how to view the maximum value for your role, see + // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1422,7 +1386,7 @@ type AssumeRoleInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1461,11 +1425,12 @@ type AssumeRoleInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1478,11 +1443,12 @@ type AssumeRoleInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1494,6 +1460,17 @@ type AssumeRoleInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` + // A list of previously acquired trusted context assertions in the format of + // a JSON array. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which + // the trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] + ProvidedContexts []*ProvidedContext `type:"list"` + // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1548,7 +1525,7 @@ type AssumeRoleInput struct { // A list of session tags that you want to pass. Each session tag consists of // a key name and an associated value. For more information about session tags, - // see Tagging STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // // This parameter is optional. You can pass up to 50 session tags. The plaintext @@ -1557,11 +1534,12 @@ type AssumeRoleInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the role. When you do, session tags override a role tag with the same @@ -1577,7 +1555,7 @@ type AssumeRoleInput struct { // the new session inherits any transitive session tags from the calling session. // If you pass a session tag with the same key as an inherited tag, the operation // fails. To view the inherited tags for a session, see the CloudTrail logs. - // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs) + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) // in the IAM User Guide. Tags []*Tag `type:"list"` @@ -1666,6 +1644,16 @@ func (s *AssumeRoleInput) Validate() error { } } } + if s.ProvidedContexts != nil { + for i, v := range s.ProvidedContexts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams)) + } + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -1707,6 +1695,12 @@ func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleIn return s } +// SetProvidedContexts sets the ProvidedContexts field's value. +func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput { + s.ProvidedContexts = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -1858,7 +1852,7 @@ type AssumeRoleWithSAMLInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -1880,11 +1874,12 @@ type AssumeRoleWithSAMLInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1897,11 +1892,12 @@ type AssumeRoleWithSAMLInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1929,8 +1925,12 @@ type AssumeRoleWithSAMLInput struct { // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the IAM User Guide. // + // SAMLAssertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's + // String and GoString methods. + // // SAMLAssertion is a required field - SAMLAssertion *string `min:"4" type:"string" required:"true"` + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2065,7 +2065,7 @@ type AssumeRoleWithSAMLOutput struct { // IAM. // // The combination of NameQualifier and Subject can be used to uniquely identify - // a federated user. + // a user. // // The following pseudocode shows how the hash value is calculated: // @@ -2205,7 +2205,7 @@ type AssumeRoleWithWebIdentityInput struct { // to the federation endpoint for a console sign-in token takes a SessionDuration // parameter that specifies the maximum length of the console session. For more // information, see Creating a URL that Enables Federated Users to Access the - // Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` @@ -2227,11 +2227,12 @@ type AssumeRoleWithWebIdentityInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2244,11 +2245,12 @@ type AssumeRoleWithWebIdentityInput struct { // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2260,11 +2262,12 @@ type AssumeRoleWithWebIdentityInput struct { // in the IAM User Guide. PolicyArns []*PolicyDescriptorType `type:"list"` - // The fully qualified host component of the domain name of the identity provider. + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. // - // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com - // and graph.facebook.com are the only supported identity providers for OAuth - // 2.0 access tokens. Do not include URL schemes and port numbers. + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. // // Do not specify this value for OpenID Connect ID tokens. ProviderId *string `min:"4" type:"string"` @@ -2290,10 +2293,15 @@ type AssumeRoleWithWebIdentityInput struct { // The OAuth 2.0 access token or OpenID Connect ID token that is provided by // the identity provider. Your application must get this token by authenticating // the user who is using your application with a web identity provider before - // the application makes an AssumeRoleWithWebIdentity call. + // the application makes an AssumeRoleWithWebIdentity call. Only tokens with + // RSA algorithms (RS256) are supported. + // + // WebIdentityToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's + // String and GoString methods. // // WebIdentityToken is a required field - WebIdentityToken *string `min:"4" type:"string" required:"true"` + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` } // String returns the string representation. @@ -2599,8 +2607,12 @@ type Credentials struct { // The secret access key that can be used to sign requests. // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Credentials's + // String and GoString methods. + // // SecretAccessKey is a required field - SecretAccessKey *string `type:"string" required:"true"` + SecretAccessKey *string `type:"string" required:"true" sensitive:"true"` // The token that users must pass to the service API to use the temporary credentials. // @@ -2705,7 +2717,7 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` - // An XML document that contains the decoded message. + // The API returns a response with the decoded message. DecodedMessage *string `type:"string"` } @@ -2948,10 +2960,9 @@ type GetFederationTokenInput struct { // The duration, in seconds, that the session should last. Acceptable durations // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained - // using Amazon Web Services account root user credentials are restricted to - // a maximum of 3,600 seconds (one hour). If the specified duration is longer - // than one hour, the session obtained by using root user credentials defaults - // to one hour. + // using root user credentials are restricted to a maximum of 3,600 seconds + // (one hour). If the specified duration is longer than one hour, the session + // obtained by using root user credentials defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the @@ -2970,8 +2981,8 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. // // This parameter is optional. However, if you do not pass any session policies, // then the resulting federated user session has no permissions. @@ -2996,11 +3007,12 @@ type GetFederationTokenInput struct { // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage // return (\u000D) characters. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -3009,11 +3021,12 @@ type GetFederationTokenInput struct { // // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an - // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plaintext that you use for both inline - // and managed session policies can't exceed 2,048 characters. You can provide - // up to 10 managed policy ARNs. For more information about ARNs, see Amazon - // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that + // you use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about + // ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the Amazon Web Services General Reference. // // This parameter is optional. However, if you do not pass any session policies, @@ -3033,11 +3046,12 @@ type GetFederationTokenInput struct { // by the policy. These permissions are granted in addition to the permissions // that are granted by the session policies. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` // A list of session tags. Each session tag consists of a key name and an associated @@ -3051,11 +3065,12 @@ type GetFederationTokenInput struct { // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // - // An Amazon Web Services conversion compresses the passed session policies - // and session tags into a packed binary format that has a separate limit. Your - // request can fail for this limit even if your plaintext meets the other requirements. - // The PackedPolicySize response element indicates by percentage how close the - // policies and tags for your request are to the upper size limit. + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the user you are federating. When you do, session tags override a user @@ -3237,8 +3252,8 @@ type GetSessionTokenInput struct { // user has a policy that requires MFA authentication. The value is either the // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). - // You can find the device for an IAM user by going to the Management Console - // and viewing the user's security credentials. + // You can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can @@ -3398,11 +3413,72 @@ func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { return s } +// Contains information about the provided context. This includes the signed +// and encrypted trusted context assertion and the context provider ARN from +// which the trusted context assertion was generated. +type ProvidedContext struct { + _ struct{} `type:"structure"` + + // The signed and encrypted trusted context assertion generated by the context + // provider. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + ContextAssertion *string `min:"4" type:"string"` + + // The context provider ARN from which the trusted context assertion was generated. + ProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvidedContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"} + if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4)) + } + if s.ProviderArn != nil && len(*s.ProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContextAssertion sets the ContextAssertion field's value. +func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext { + s.ContextAssertion = &v + return s +} + +// SetProviderArn sets the ProviderArn field's value. +func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext { + s.ProviderArn = &v + return s +} + // You can pass custom key-value pair attributes when you assume a role or federate // a user. These are called session tags. You can then use the session tags -// to control access to resources. For more information, see Tagging STS Sessions -// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in -// the IAM User Guide. +// to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. type Tag struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index 2d98d923..ea1d9eb0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -4,17 +4,16 @@ // requests to AWS Security Token Service. // // Security Token Service (STS) enables you to request temporary, limited-privilege -// credentials for Identity and Access Management (IAM) users or for users that -// you authenticate (federated users). This guide provides descriptions of the -// STS API. For more information about using this service, see Temporary Security -// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// credentials for users. This guide provides descriptions of the STS API. For +// more information about using this service, see Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // // See sts package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ // -// Using the Client +// # Using the Client // // To contact AWS Security Token Service with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 7897d70c..b680bbd5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -67,7 +67,7 @@ const ( // // You could receive this error even though you meet other defined session policy // and session tag limits. For more information, see IAM and STS Entity Character - // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) // in the IAM User Guide. ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index d34a6855..12327d05 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -39,31 +39,37 @@ const ( // aws.Config parameter to add your extra config. // // Example: -// mySession := session.Must(session.NewSession()) // -// // Create a STS client from just a session. -// svc := sts.New(mySession) +// mySession := session.Must(session.NewSession()) // -// // Create a STS client with additional configuration -// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS { svc := &STS{ Client: client.New( cfg, metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2011-06-15", + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + ResolvedRegion: resolvedRegion, }, handlers, ), diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go index e2e1d6ef..bf06b2e7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -23,37 +23,37 @@ import ( // can be stubbed out for unit testing your code with the SDK without needing // to inject custom request handlers into the SDK's request pipeline. // -// // myFunc uses an SDK service client to make a request to -// // AWS Security Token Service. -// func myFunc(svc stsiface.STSAPI) bool { -// // Make svc.AssumeRole request -// } +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } // -// func main() { -// sess := session.New() -// svc := sts.New(sess) +// func main() { +// sess := session.New() +// svc := sts.New(sess) // -// myFunc(svc) -// } +// myFunc(svc) +// } // // In your _test.go file: // -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockSTSClient struct { -// stsiface.STSAPI -// } -// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { -// // mock response/functionality -// } +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } // -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockSTSClient{} +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} // -// myfunc(mockSvc) +// myfunc(mockSvc) // -// // Verify myFunc's functionality -// } +// // Verify myFunc's functionality +// } // // It is important to note that this interface will have breaking changes // when the service model is updated and adds new API operations, paginators, diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore index c01141aa..c92d6105 100644 --- a/vendor/github.com/aws/smithy-go/.gitignore +++ b/vendor/github.com/aws/smithy-go/.gitignore @@ -20,3 +20,7 @@ target/ build/ */out/ */*/out/ + +# VS Code +bin/ +.vscode/ diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 1e23bf95..46b11508 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,52 @@ +# Release (2023-12-07) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.19.0 + * **Feature**: Support modeled request compression. + +# Release (2023-11-30) + +* No change notes available for this release. + +# Release (2023-11-29) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.18.0 + * **Feature**: Expose Options() method on generated service clients. + +# Release (2023-11-15) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.17.0 + * **Feature**: Support identity/auth components of client reference architecture. + +# Release (2023-10-31) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.16.0 + * **Feature**: **LANG**: Bump minimum go version to 1.19. + +# Release (2023-10-06) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.15.0 + * **Feature**: Add `http.WithHeaderComment` middleware. + +# Release (2023-08-18) + +* No change notes available for this release. + +# Release (2023-08-07) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.14.1 + * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation. + +# Release (2023-07-31) + +## General Highlights +* **Feature**: Adds support for smithy-modeled endpoint resolution. + # Release (2022-12-02) * No change notes available for this release. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index a4bb43fb..c374f692 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -6,6 +6,21 @@ **WARNING: All interfaces are subject to change.** +## Can I use this? + +In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), +such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), +in order to generate transport mechanisms and serialization/deserialization +code ("serde") accordingly. + +The code generator does not currently support any protocols out of the box, +therefore the useability of this project on its own is currently limited. +Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) +exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are +tracking the movement of those out of the SDK into smithy-go in +[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no +timeline for doing so. + ## License This project is licensed under the Apache-2.0 License. diff --git a/vendor/github.com/aws/smithy-go/auth/auth.go b/vendor/github.com/aws/smithy-go/auth/auth.go new file mode 100644 index 00000000..5bdb70c9 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/auth.go @@ -0,0 +1,3 @@ +// Package auth defines protocol-agnostic authentication types for smithy +// clients. +package auth diff --git a/vendor/github.com/aws/smithy-go/auth/identity.go b/vendor/github.com/aws/smithy-go/auth/identity.go new file mode 100644 index 00000000..ba8cf70d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/identity.go @@ -0,0 +1,47 @@ +package auth + +import ( + "context" + "time" + + "github.com/aws/smithy-go" +) + +// Identity contains information that identifies who the user making the +// request is. +type Identity interface { + Expiration() time.Time +} + +// IdentityResolver defines the interface through which an Identity is +// retrieved. +type IdentityResolver interface { + GetIdentity(context.Context, smithy.Properties) (Identity, error) +} + +// IdentityResolverOptions defines the interface through which an entity can be +// queried to retrieve an IdentityResolver for a given auth scheme. +type IdentityResolverOptions interface { + GetIdentityResolver(schemeID string) IdentityResolver +} + +// AnonymousIdentity is a sentinel to indicate no identity. +type AnonymousIdentity struct{} + +var _ Identity = (*AnonymousIdentity)(nil) + +// Expiration returns the zero value for time, as anonymous identity never +// expires. +func (*AnonymousIdentity) Expiration() time.Time { + return time.Time{} +} + +// AnonymousIdentityResolver returns AnonymousIdentity. +type AnonymousIdentityResolver struct{} + +var _ IdentityResolver = (*AnonymousIdentityResolver)(nil) + +// GetIdentity returns AnonymousIdentity. +func (*AnonymousIdentityResolver) GetIdentity(_ context.Context, _ smithy.Properties) (Identity, error) { + return &AnonymousIdentity{}, nil +} diff --git a/vendor/github.com/aws/smithy-go/auth/option.go b/vendor/github.com/aws/smithy-go/auth/option.go new file mode 100644 index 00000000..d5dabff0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/option.go @@ -0,0 +1,25 @@ +package auth + +import "github.com/aws/smithy-go" + +type ( + authOptionsKey struct{} +) + +// Option represents a possible authentication method for an operation. +type Option struct { + SchemeID string + IdentityProperties smithy.Properties + SignerProperties smithy.Properties +} + +// GetAuthOptions gets auth Options from Properties. +func GetAuthOptions(p *smithy.Properties) ([]*Option, bool) { + v, ok := p.Get(authOptionsKey{}).([]*Option) + return v, ok +} + +// SetAuthOptions sets auth Options on Properties. +func SetAuthOptions(p *smithy.Properties, options []*Option) { + p.Set(authOptionsKey{}, options) +} diff --git a/vendor/github.com/aws/smithy-go/auth/scheme_id.go b/vendor/github.com/aws/smithy-go/auth/scheme_id.go new file mode 100644 index 00000000..fb6a57c6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/auth/scheme_id.go @@ -0,0 +1,20 @@ +package auth + +// Anonymous +const ( + SchemeIDAnonymous = "smithy.api#noAuth" +) + +// HTTP auth schemes +const ( + SchemeIDHTTPBasic = "smithy.api#httpBasicAuth" + SchemeIDHTTPDigest = "smithy.api#httpDigestAuth" + SchemeIDHTTPBearer = "smithy.api#httpBearerAuth" + SchemeIDHTTPAPIKey = "smithy.api#httpApiKeyAuth" +) + +// AWS auth schemes +const ( + SchemeIDSigV4 = "aws.auth#sigv4" + SchemeIDSigV4A = "aws.auth#sigv4a" +) diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/cache.go b/vendor/github.com/aws/smithy-go/container/private/cache/cache.go new file mode 100644 index 00000000..69af8775 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/container/private/cache/cache.go @@ -0,0 +1,19 @@ +// Package cache defines the interface for a key-based data store. +// +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. +package cache + +// Cache defines the interface for an opaquely-typed, key-based data store. +// +// The thread-safety of this interface is undefined and is dictated by +// implementations. +type Cache interface { + // Retrieve the value associated with the given key. The returned boolean + // indicates whether the cache held a value for the given key. + Get(k interface{}) (interface{}, bool) + + // Store a value under the given key. + Put(k interface{}, v interface{}) +} diff --git a/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go b/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go new file mode 100644 index 00000000..02ecb0a3 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/container/private/cache/lru/lru.go @@ -0,0 +1,63 @@ +// Package lru implements [cache.Cache] with an LRU eviction policy. +// +// This implementation is NOT thread-safe. +// +// This package is designated as private and is intended for use only by the +// smithy client runtime. The exported API therein is not considered stable and +// is subject to breaking changes without notice. +package lru + +import ( + "container/list" + + "github.com/aws/smithy-go/container/private/cache" +) + +// New creates a new LRU cache with the given capacity. +func New(cap int) cache.Cache { + return &lru{ + entries: make(map[interface{}]*list.Element, cap), + cap: cap, + mru: list.New(), + } +} + +type lru struct { + entries map[interface{}]*list.Element + cap int + + mru *list.List // least-recently used is at the back +} + +type element struct { + key interface{} + value interface{} +} + +func (l *lru) Get(k interface{}) (interface{}, bool) { + e, ok := l.entries[k] + if !ok { + return nil, false + } + + l.mru.MoveToFront(e) + return e.Value.(*element).value, true +} + +func (l *lru) Put(k interface{}, v interface{}) { + if len(l.entries) == l.cap { + l.evict() + } + + ev := &element{ + key: k, + value: v, + } + e := l.mru.PushFront(ev) + l.entries[k] = e +} + +func (l *lru) evict() { + e := l.mru.Remove(l.mru.Back()) + delete(l.entries, e.(*element).key) +} diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go index 96abd073..543e7cf0 100644 --- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go +++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go @@ -26,10 +26,17 @@ type Encoder struct { header http.Header } -// NewEncoder creates a new encoder from the passed in request. All query and +// NewEncoder creates a new encoder from the passed in request. It assumes that +// raw path contains no valuable information at this point, so it passes in path +// as path and raw path for subsequent trans +func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { + return NewEncoderWithRawPath(path, path, query, headers) +} + +// NewHTTPBindingEncoder creates a new encoder from the passed in request. All query and // header values will be added on top of the request's existing values. Overwriting // duplicate values. -func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { +func NewEncoderWithRawPath(path, rawPath, query string, headers http.Header) (*Encoder, error) { parseQuery, err := url.ParseQuery(query) if err != nil { return nil, fmt.Errorf("failed to parse query string: %w", err) @@ -37,7 +44,7 @@ func NewEncoder(path, query string, headers http.Header) (*Encoder, error) { e := &Encoder{ path: []byte(path), - rawPath: []byte(path), + rawPath: []byte(rawPath), query: parseQuery, header: headers.Clone(), } diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go new file mode 100644 index 00000000..a9352839 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go @@ -0,0 +1,23 @@ +package transport + +import ( + "net/http" + "net/url" + + "github.com/aws/smithy-go" +) + +// Endpoint is the endpoint object returned by Endpoint resolution V2 +type Endpoint struct { + // The complete URL minimally specfiying the scheme and host. + // May optionally specify the port and base path component. + URI url.URL + + // An optional set of headers to be sent using transport layer headers. + Headers http.Header + + // A grab-bag property map of endpoint attributes. The + // values present here are subject to change, or being add/removed at any + // time. + Properties smithy.Properties +} diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go new file mode 100644 index 00000000..e24e190d --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go @@ -0,0 +1,4 @@ +// Package rulesfn provides endpoint rule functions for evaluating endpoint +// resolution rules. + +package rulesfn diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go new file mode 100644 index 00000000..5cf4a7b0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go @@ -0,0 +1,25 @@ +package rulesfn + +// Substring returns the substring of the input provided. If the start or stop +// indexes are not valid for the input nil will be returned. If errors occur +// they will be added to the provided [ErrorCollector]. +func SubString(input string, start, stop int, reverse bool) *string { + if start < 0 || stop < 1 || start >= stop || len(input) < stop { + return nil + } + + for _, r := range input { + if r > 127 { + return nil + } + } + + if !reverse { + v := input[start:stop] + return &v + } + + rStart := len(input) - stop + rStop := len(input) - start + return SubString(input, rStart, rStop, false) +} diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go new file mode 100644 index 00000000..0c115412 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go @@ -0,0 +1,130 @@ +package rulesfn + +import ( + "fmt" + "net" + "net/url" + "strings" + + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// IsValidHostLabel returns if the input is a single valid [RFC 1123] host +// label. If allowSubDomains is true, will allow validation to include nested +// host labels. Returns false if the input is not a valid host label. If errors +// occur they will be added to the provided [ErrorCollector]. +// +// [RFC 1123]: https://www.ietf.org/rfc/rfc1123.txt +func IsValidHostLabel(input string, allowSubDomains bool) bool { + var labels []string + if allowSubDomains { + labels = strings.Split(input, ".") + } else { + labels = []string{input} + } + + for _, label := range labels { + if !smithyhttp.ValidHostLabel(label) { + return false + } + } + + return true +} + +// ParseURL returns a [URL] if the provided string could be parsed. Returns nil +// if the string could not be parsed. Any parsing error will be added to the +// [ErrorCollector]. +// +// If the input URL string contains an IP6 address with a zone index. The +// returned [builtin.URL.Authority] value will contain the percent escaped (%) +// zone index separator. +func ParseURL(input string) *URL { + u, err := url.Parse(input) + if err != nil { + return nil + } + + if u.RawQuery != "" { + return nil + } + + if u.Scheme != "http" && u.Scheme != "https" { + return nil + } + + normalizedPath := u.Path + if !strings.HasPrefix(normalizedPath, "/") { + normalizedPath = "/" + normalizedPath + } + if !strings.HasSuffix(normalizedPath, "/") { + normalizedPath = normalizedPath + "/" + } + + // IP6 hosts may have zone indexes that need to be escaped to be valid in a + // URI. The Go URL parser will unescape the `%25` into `%`. This needs to + // be reverted since the returned URL will be used in string builders. + authority := strings.ReplaceAll(u.Host, "%", "%25") + + return &URL{ + Scheme: u.Scheme, + Authority: authority, + Path: u.Path, + NormalizedPath: normalizedPath, + IsIp: net.ParseIP(hostnameWithoutZone(u)) != nil, + } +} + +// URL provides the structure describing the parts of a parsed URL returned by +// [ParseURL]. +type URL struct { + Scheme string // https://www.rfc-editor.org/rfc/rfc3986#section-3.1 + Authority string // https://www.rfc-editor.org/rfc/rfc3986#section-3.2 + Path string // https://www.rfc-editor.org/rfc/rfc3986#section-3.3 + NormalizedPath string // https://www.rfc-editor.org/rfc/rfc3986#section-6.2.3 + IsIp bool +} + +// URIEncode returns an percent-encoded [RFC3986 section 2.1] version of the +// input string. +// +// [RFC3986 section 2.1]: https://www.rfc-editor.org/rfc/rfc3986#section-2.1 +func URIEncode(input string) string { + var output strings.Builder + for _, c := range []byte(input) { + if validPercentEncodedChar(c) { + output.WriteByte(c) + continue + } + + fmt.Fprintf(&output, "%%%X", c) + } + + return output.String() +} + +func validPercentEncodedChar(c byte) bool { + return (c >= 'a' && c <= 'z') || + (c >= 'A' && c <= 'Z') || + (c >= '0' && c <= '9') || + c == '-' || c == '_' || c == '.' || c == '~' +} + +// hostname implements u.Hostname() but strips the ipv6 zone ID (if present) +// such that net.ParseIP can still recognize IPv6 addresses with zone IDs. +// +// FUTURE(10/2023): netip.ParseAddr handles this natively but we can't take +// that package as a dependency yet due to our min go version (1.15, netip +// starts in 1.18). When we align with go runtime deprecation policy in +// 10/2023, we can remove this. +func hostnameWithoutZone(u *url.URL) string { + full := u.Hostname() + + // this more or less mimics the internals of net/ (see unexported + // splitHostZone in that source) but throws the zone away because we don't + // need it + if i := strings.LastIndex(full, "%"); i > -1 { + return full[:i] + } + return full +} diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index 8eaac41e..cd6f7fa4 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.5" +const goModuleVersion = "1.19.0" diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go new file mode 100644 index 00000000..c9af66c0 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/properties.go @@ -0,0 +1,62 @@ +package smithy + +// PropertiesReader provides an interface for reading metadata from the +// underlying metadata container. +type PropertiesReader interface { + Get(key interface{}) interface{} +} + +// Properties provides storing and reading metadata values. Keys may be any +// comparable value type. Get and Set will panic if a key is not comparable. +// +// The zero value for a Properties instance is ready for reads/writes without +// any additional initialization. +type Properties struct { + values map[interface{}]interface{} +} + +// Get attempts to retrieve the value the key points to. Returns nil if the +// key was not found. +// +// Panics if key type is not comparable. +func (m *Properties) Get(key interface{}) interface{} { + m.lazyInit() + return m.values[key] +} + +// Set stores the value pointed to by the key. If a value already exists at +// that key it will be replaced with the new value. +// +// Panics if the key type is not comparable. +func (m *Properties) Set(key, value interface{}) { + m.lazyInit() + m.values[key] = value +} + +// Has returns whether the key exists in the metadata. +// +// Panics if the key type is not comparable. +func (m *Properties) Has(key interface{}) bool { + m.lazyInit() + _, ok := m.values[key] + return ok +} + +// SetAll accepts all of the given Properties into the receiver, overwriting +// any existing keys in the case of conflicts. +func (m *Properties) SetAll(other *Properties) { + if other.values == nil { + return + } + + m.lazyInit() + for k, v := range other.values { + m.values[k] = v + } +} + +func (m *Properties) lazyInit() { + if m.values == nil { + m.values = map[interface{}]interface{}{} + } +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth.go b/vendor/github.com/aws/smithy-go/transport/http/auth.go new file mode 100644 index 00000000..58e1ab5e --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/auth.go @@ -0,0 +1,21 @@ +package http + +import ( + "context" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" +) + +// AuthScheme defines an HTTP authentication scheme. +type AuthScheme interface { + SchemeID() string + IdentityResolver(auth.IdentityResolverOptions) auth.IdentityResolver + Signer() Signer +} + +// Signer defines the interface through which HTTP requests are supplemented +// with an Identity. +type Signer interface { + SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go new file mode 100644 index 00000000..d60cf2a6 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go @@ -0,0 +1,45 @@ +package http + +import ( + "context" + + smithy "github.com/aws/smithy-go" + "github.com/aws/smithy-go/auth" +) + +// NewAnonymousScheme returns the anonymous HTTP auth scheme. +func NewAnonymousScheme() AuthScheme { + return &authScheme{ + schemeID: auth.SchemeIDAnonymous, + signer: &nopSigner{}, + } +} + +// authScheme is parameterized to generically implement the exported AuthScheme +// interface +type authScheme struct { + schemeID string + signer Signer +} + +var _ AuthScheme = (*authScheme)(nil) + +func (s *authScheme) SchemeID() string { + return s.schemeID +} + +func (s *authScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver { + return o.GetIdentityResolver(s.schemeID) +} + +func (s *authScheme) Signer() Signer { + return s.signer +} + +type nopSigner struct{} + +var _ Signer = (*nopSigner)(nil) + +func (*nopSigner) SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error { + return nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go new file mode 100644 index 00000000..855c2272 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go @@ -0,0 +1,81 @@ +package http + +import ( + "context" + "fmt" + "net/http" + + "github.com/aws/smithy-go/middleware" +) + +// WithHeaderComment instruments a middleware stack to append an HTTP field +// comment to the given header as specified in RFC 9110 +// (https://www.rfc-editor.org/rfc/rfc9110#name-comments). +// +// The header is case-insensitive. If the provided header exists when the +// middleware runs, the content will be inserted as-is enclosed in parentheses. +// +// Note that per the HTTP specification, comments are only allowed in fields +// containing "comment" as part of their field value definition, but this API +// will NOT verify whether the provided header is one of them. +// +// WithHeaderComment MAY be applied more than once to a middleware stack and/or +// more than once per header. +func WithHeaderComment(header, content string) func(*middleware.Stack) error { + return func(s *middleware.Stack) error { + m, err := getOrAddHeaderComment(s) + if err != nil { + return fmt.Errorf("get or add header comment: %v", err) + } + + m.values.Add(header, content) + return nil + } +} + +type headerCommentMiddleware struct { + values http.Header // hijack case-insensitive access APIs +} + +func (*headerCommentMiddleware) ID() string { + return "headerComment" +} + +func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + r, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + for h, contents := range m.values { + for _, c := range contents { + if existing := r.Header.Get(h); existing != "" { + r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c)) + } + } + } + + return next.HandleBuild(ctx, in) +} + +func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) { + id := (*headerCommentMiddleware)(nil).ID() + m, ok := s.Build.Get(id) + if !ok { + m := &headerCommentMiddleware{values: http.Header{}} + if err := s.Build.Add(m, middleware.After); err != nil { + return nil, fmt.Errorf("add build: %v", err) + } + + return m, nil + } + + hc, ok := m.(*headerCommentMiddleware) + if !ok { + return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id) + } + + return hc, nil +} diff --git a/vendor/github.com/aws/smithy-go/transport/http/properties.go b/vendor/github.com/aws/smithy-go/transport/http/properties.go new file mode 100644 index 00000000..c65aa393 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/properties.go @@ -0,0 +1,80 @@ +package http + +import smithy "github.com/aws/smithy-go" + +type ( + sigV4SigningNameKey struct{} + sigV4SigningRegionKey struct{} + + sigV4ASigningNameKey struct{} + sigV4ASigningRegionsKey struct{} + + isUnsignedPayloadKey struct{} + disableDoubleEncodingKey struct{} +) + +// GetSigV4SigningName gets the signing name from Properties. +func GetSigV4SigningName(p *smithy.Properties) (string, bool) { + v, ok := p.Get(sigV4SigningNameKey{}).(string) + return v, ok +} + +// SetSigV4SigningName sets the signing name on Properties. +func SetSigV4SigningName(p *smithy.Properties, name string) { + p.Set(sigV4SigningNameKey{}, name) +} + +// GetSigV4SigningRegion gets the signing region from Properties. +func GetSigV4SigningRegion(p *smithy.Properties) (string, bool) { + v, ok := p.Get(sigV4SigningRegionKey{}).(string) + return v, ok +} + +// SetSigV4SigningRegion sets the signing region on Properties. +func SetSigV4SigningRegion(p *smithy.Properties, region string) { + p.Set(sigV4SigningRegionKey{}, region) +} + +// GetSigV4ASigningName gets the v4a signing name from Properties. +func GetSigV4ASigningName(p *smithy.Properties) (string, bool) { + v, ok := p.Get(sigV4ASigningNameKey{}).(string) + return v, ok +} + +// SetSigV4ASigningName sets the signing name on Properties. +func SetSigV4ASigningName(p *smithy.Properties, name string) { + p.Set(sigV4ASigningNameKey{}, name) +} + +// GetSigV4ASigningRegion gets the v4a signing region set from Properties. +func GetSigV4ASigningRegions(p *smithy.Properties) ([]string, bool) { + v, ok := p.Get(sigV4ASigningRegionsKey{}).([]string) + return v, ok +} + +// SetSigV4ASigningRegions sets the v4a signing region set on Properties. +func SetSigV4ASigningRegions(p *smithy.Properties, regions []string) { + p.Set(sigV4ASigningRegionsKey{}, regions) +} + +// GetIsUnsignedPayload gets whether the payload is unsigned from Properties. +func GetIsUnsignedPayload(p *smithy.Properties) (bool, bool) { + v, ok := p.Get(isUnsignedPayloadKey{}).(bool) + return v, ok +} + +// SetIsUnsignedPayload sets whether the payload is unsigned on Properties. +func SetIsUnsignedPayload(p *smithy.Properties, isUnsignedPayload bool) { + p.Set(isUnsignedPayloadKey{}, isUnsignedPayload) +} + +// GetDisableDoubleEncoding gets whether the payload is unsigned from Properties. +func GetDisableDoubleEncoding(p *smithy.Properties) (bool, bool) { + v, ok := p.Get(disableDoubleEncodingKey{}).(bool) + return v, ok +} + +// SetDisableDoubleEncoding sets whether the payload is unsigned on Properties. +func SetDisableDoubleEncoding(p *smithy.Properties, disableDoubleEncoding bool) { + p.Set(disableDoubleEncodingKey{}, disableDoubleEncoding) +} diff --git a/vendor/github.com/danieljoos/wincred/README.md b/vendor/github.com/danieljoos/wincred/README.md index 14f28622..8a879b0c 100644 --- a/vendor/github.com/danieljoos/wincred/README.md +++ b/vendor/github.com/danieljoos/wincred/README.md @@ -4,7 +4,7 @@ wincred Go wrapper around the Windows Credential Manager API functions. [![GitHub release](https://img.shields.io/github/release/danieljoos/wincred.svg?style=flat-square)](https://github.com/danieljoos/wincred/releases/latest) -[![Test Status](https://img.shields.io/github/workflow/status/danieljoos/wincred/test?label=test&logo=github&style=flat-square)](https://github.com/danieljoos/wincred/actions?query=workflow%3Atest) +[![Test Status](https://img.shields.io/github/actions/workflow/status/danieljoos/wincred/test.yml?label=test&logo=github&style=flat-square)](https://github.com/danieljoos/wincred/actions?query=workflow%3Atest) [![Go Report Card](https://goreportcard.com/badge/github.com/danieljoos/wincred)](https://goreportcard.com/report/github.com/danieljoos/wincred) [![Codecov](https://img.shields.io/codecov/c/github/danieljoos/wincred?logo=codecov&style=flat-square)](https://codecov.io/gh/danieljoos/wincred) [![PkgGoDev](https://img.shields.io/badge/go.dev-docs-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/danieljoos/wincred) diff --git a/vendor/github.com/danieljoos/wincred/sys.go b/vendor/github.com/danieljoos/wincred/sys.go index 033d3c4f..fb8a6ac0 100644 --- a/vendor/github.com/danieljoos/wincred/sys.go +++ b/vendor/github.com/danieljoos/wincred/sys.go @@ -1,21 +1,23 @@ +//go:build windows // +build windows package wincred import ( "reflect" + "syscall" "unsafe" - syscall "golang.org/x/sys/windows" + "golang.org/x/sys/windows" ) var ( - modadvapi32 = syscall.NewLazyDLL("advapi32.dll") - procCredRead proc = modadvapi32.NewProc("CredReadW") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + procCredRead = modadvapi32.NewProc("CredReadW") procCredWrite proc = modadvapi32.NewProc("CredWriteW") procCredDelete proc = modadvapi32.NewProc("CredDeleteW") procCredFree proc = modadvapi32.NewProc("CredFree") - procCredEnumerate proc = modadvapi32.NewProc("CredEnumerateW") + procCredEnumerate = modadvapi32.NewProc("CredEnumerateW") ) // Interface for syscall.Proc: helps testing @@ -29,7 +31,7 @@ type sysCREDENTIAL struct { Type uint32 TargetName *uint16 Comment *uint16 - LastWritten syscall.Filetime + LastWritten windows.Filetime CredentialBlobSize uint32 CredentialBlob uintptr Persist uint32 @@ -59,15 +61,17 @@ const ( sysCRED_TYPE_DOMAIN_EXTENDED sysCRED_TYPE = 0x6 // https://docs.microsoft.com/en-us/windows/desktop/Debug/system-error-codes - sysERROR_NOT_FOUND = syscall.Errno(1168) - sysERROR_INVALID_PARAMETER = syscall.Errno(87) + sysERROR_NOT_FOUND = windows.Errno(1168) + sysERROR_INVALID_PARAMETER = windows.Errno(87) + sysERROR_BAD_USERNAME = windows.Errno(2202) ) // https://docs.microsoft.com/en-us/windows/desktop/api/wincred/nf-wincred-credreadw func sysCredRead(targetName string, typ sysCRED_TYPE) (*Credential, error) { var pcred *sysCREDENTIAL - targetNamePtr, _ := syscall.UTF16PtrFromString(targetName) - ret, _, err := procCredRead.Call( + targetNamePtr, _ := windows.UTF16PtrFromString(targetName) + ret, _, err := syscall.SyscallN( + procCredRead.Addr(), uintptr(unsafe.Pointer(targetNamePtr)), uintptr(typ), 0, @@ -98,7 +102,7 @@ func sysCredWrite(cred *Credential, typ sysCRED_TYPE) error { // https://docs.microsoft.com/en-us/windows/desktop/api/wincred/nf-wincred-creddeletew func sysCredDelete(cred *Credential, typ sysCRED_TYPE) error { - targetNamePtr, _ := syscall.UTF16PtrFromString(cred.TargetName) + targetNamePtr, _ := windows.UTF16PtrFromString(cred.TargetName) ret, _, err := procCredDelete.Call( uintptr(unsafe.Pointer(targetNamePtr)), uintptr(typ), @@ -117,9 +121,10 @@ func sysCredEnumerate(filter string, all bool) ([]*Credential, error) { var pcreds uintptr var filterPtr *uint16 if !all { - filterPtr, _ = syscall.UTF16PtrFromString(filter) + filterPtr, _ = windows.UTF16PtrFromString(filter) } - ret, _, err := procCredEnumerate.Call( + ret, _, err := syscall.SyscallN( + procCredEnumerate.Addr(), uintptr(unsafe.Pointer(filterPtr)), 0, uintptr(unsafe.Pointer(&count)), diff --git a/vendor/github.com/danieljoos/wincred/wincred.go b/vendor/github.com/danieljoos/wincred/wincred.go index 998de7b5..5632ee90 100644 --- a/vendor/github.com/danieljoos/wincred/wincred.go +++ b/vendor/github.com/danieljoos/wincred/wincred.go @@ -16,6 +16,9 @@ const ( // This error constant can be used to check if the given function parameters were invalid. // For example when trying to create a new generic credential with an empty target name. ErrInvalidParameter = sysERROR_INVALID_PARAMETER + + // ErrBadUsername is returned when the credential's username is invalid. + ErrBadUsername = sysERROR_BAD_USERNAME ) // GetGenericCredential fetches the generic credential with the given name from Windows credential manager. diff --git a/vendor/github.com/dvsekhvalnov/jose2go/README.md b/vendor/github.com/dvsekhvalnov/jose2go/README.md index 9df801c5..bbf0ef75 100644 --- a/vendor/github.com/dvsekhvalnov/jose2go/README.md +++ b/vendor/github.com/dvsekhvalnov/jose2go/README.md @@ -12,9 +12,11 @@ Extensively unit tested and cross tested (100+ tests) for compatibility with [jo ## Status -Used in production. GA ready. Current version is 1.5. +Used in production. GA ready. Current version is 1.6. ## Important +v1.6 security tuning options + v1.5 bug fix release v1.4 changes default behavior of inserting `typ=JWT` header if not overriden. As of 1.4 no @@ -250,7 +252,7 @@ func main() { //go use token fmt.Printf("\ntoken = %v\n",token) } -} +} ``` #### AES Key Wrap key management family of algorithms @@ -330,7 +332,7 @@ func main() { //go use token fmt.Printf("\ntoken = %v\n",token) } -} +} ``` #### PBES2 using HMAC SHA with AES Key Wrap key management family of algorithms @@ -482,7 +484,7 @@ func main() { //and/or use headers fmt.Printf("\nheaders = %v\n",headers) } -} +} ``` **RSA-OAEP-256**, **RSA-OAEP** and **RSA1_5** key management algorithms expecting `*rsa.PrivateKey` private key of corresponding length: @@ -522,7 +524,7 @@ func main() { //and/or use headers fmt.Printf("\nheaders = %v\n",headers) } -} +} ``` **PBES2-HS256+A128KW, PBES2-HS384+A192KW, PBES2-HS512+A256KW** key management algorithms expects `string` passpharase as a key @@ -679,6 +681,8 @@ func main() { } ``` +Two phase validation can be used for implementing additional things like strict `alg` or `enc` validation, see [Customizing library for security](#customizing-library-for-security) for more information. + ### Working with binary payload In addition to work with string payloads (typical use-case) `jose2go` supports encoding and decoding of raw binary data. `jose.DecodeBytes`, `jose.SignBytes` @@ -776,7 +780,7 @@ func main() { //go use token fmt.Printf("\ntoken = %v\n",token) } -} +} ``` ### Dealing with keys **jose2go** provides several helper methods to simplify loading & importing of elliptic and rsa keys. Import `jose2go/keys/rsa` or `jose2go/keys/ecc` respectively: @@ -925,7 +929,88 @@ func main() { ### More examples Checkout `jose_test.go` for more examples. +## Customizing library for security +In response to ever increasing attacks on various JWT implementations, `jose2go` as of version v1.6 introduced number of additional security controls to limit potential attack surface on services and projects using the library. + +### Deregister algorithm implementations +One can use following methods to deregister any signing, encryption, key management or compression algorithms from runtime suite, that is considered unsafe or simply not expected by service. + +- `func DeregisterJwa(alg string) JwaAlgorithm` +- `func DeregisterJwe(alg string) JweEncryption` +- `func DeregisterJws(alg string) JwsAlgorithm` +- `func DeregisterJwc(alg string) JwcAlgorithm` + +All of them expecting alg name matching `jose` constants and returns implementation that have been deregistered. + +### Strict validation +Sometimes it is desirable to verify that `alg` or `enc` values are matching expected before attempting to decode actual payload. +`jose2go` provides helper matchers to be used within [Two-phase validation](#two-phase-validation) precheck: + +- `jose.Alg(key, alg)` - to match alg header +- `jose.Enc(key, alg)` - to match alg and enc headers + +```Go + token := "eyJhbGciOiJSUzI1NiIsImN0eSI6InRleHRcL3BsYWluIn0.eyJoZWxsbyI6ICJ3b3JsZCJ9.NL_dfVpZkhNn4bZpCyMq5TmnXbT4yiyecuB6Kax_lV8Yq2dG8wLfea-T4UKnrjLOwxlbwLwuKzffWcnWv3LVAWfeBxhGTa0c4_0TX_wzLnsgLuU6s9M2GBkAIuSMHY6UTFumJlEeRBeiqZNrlqvmAzQ9ppJHfWWkW4stcgLCLMAZbTqvRSppC1SMxnvPXnZSWn_Fk_q3oGKWw6Nf0-j-aOhK0S0Lcr0PV69ZE4xBYM9PUS1MpMe2zF5J3Tqlc1VBcJ94fjDj1F7y8twmMT3H1PI9RozO-21R0SiXZ_a93fxhE_l_dj5drgOek7jUN9uBDjkXUwJPAyp9YPehrjyLdw" + + key := Rsa.ReadPublic(....) + + // we expecting 'RS256' alg here and if matching continue to decode with a key + payload, header, err := jose.Decode(token, Alg(key, "RS256")) + + // or match both alg and enc for decrypting scenarios + payload, header, err := jose.Decode(token, Enc(key, "RSA-OAEP-256", "A192CBC-HS384")) +``` + +### Customizing PBKDF2 +As it quite easy to abuse PBES2 family of algorithms via forging header with extra large p2c values, jose-jwt library introduced iteration count limits in v1.6 to reduce runtime exposure. + +By default, maxIterations is set according to [OWASP PBKDF2](https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2) Recomendations: + +``` +PBES2-HS256+A128KW: 1300000 +PBES2-HS384+A192KW: 950000 +PBES2-HS512+A256KW: 600000 +``` + +, while minIterations kept at 0 for backward compatibility. + +If it is desired to implement different limits, register new implementation with new parameters: + +```Go + jose.RegisterJwa(NewPbse2HmacAesKWAlg(128, 1300000, 1300000)) + jose.RegisterJwa(NewPbse2HmacAesKWAlg(192, 950000, 950000)) + jose.RegisterJwa(NewPbse2HmacAesKWAlg(256, 600000, 600000)) +``` + +In case you can't upgrade to latest version, but would like to have protections against PBES2 abuse, it is recommended to stick with [Two-phase validation](#two-phase-validation) precheck before decoding: + +```Go +test, headers, err := Decode(token, func(headers map[string]interface{}, payload string) interface{} { + alg := headers["alg"].(string) + p2c := headers["p2c"].(float64) + + if strings.HasPrefix(alg, "PBES2-") && int64(p2c) > 100 { + return errors.New("Too many p2c interation count, aborting") + } + + return "top secret" +}) +``` + ## Changelog +### 1.6 +- ability to deregister specific algorithms +- configurable min/max restrictions for PBES2-HS256+A128KW, PBES2-HS384+A192KW, PBES2-HS512+A256KW + +### 1.5 +- security and bug fixes + +### 1.4 +- removed extra headers to be inserted by library + +### 1.3 +- security fixes: Invalid Curve Attack on NIST curves + ### 1.2 - interface to access token headers after decoding - interface to provide extra headers for token encoding diff --git a/vendor/github.com/dvsekhvalnov/jose2go/jose.go b/vendor/github.com/dvsekhvalnov/jose2go/jose.go index 1f1c19ed..3549a918 100644 --- a/vendor/github.com/dvsekhvalnov/jose2go/jose.go +++ b/vendor/github.com/dvsekhvalnov/jose2go/jose.go @@ -1,4 +1,4 @@ -//Package jose provides high level functions for producing (signing, encrypting and +// Package jose provides high level functions for producing (signing, encrypting and // compressing) or consuming (decoding) Json Web Tokens using Java Object Signing and Encryption spec package jose @@ -79,6 +79,42 @@ func RegisterJwc(alg JwcAlgorithm) { jwcCompressors[alg.Name()] = alg } +// DeregisterJwa deregister existing key management algorithm +func DeregisterJwa(alg string) JwaAlgorithm { + jwa := jwaAlgorithms[alg] + + delete(jwaAlgorithms, alg) + + return jwa +} + +// DeregisterJws deregister existing signing algorithm +func DeregisterJws(alg string) JwsAlgorithm { + jws := jwsHashers[alg] + + delete(jwsHashers, alg) + + return jws +} + +// DeregisterJws deregister existing encryption algorithm +func DeregisterJwe(alg string) JweEncryption { + jwe := jweEncryptors[alg] + + delete(jweEncryptors, alg) + + return jwe +} + +// DeregisterJwc deregister existing compression algorithm +func DeregisterJwc(alg string) JwcAlgorithm { + jwc := jwcCompressors[alg] + + delete(jwcCompressors, alg) + + return jwc +} + // JweEncryption is a contract for implementing encryption algorithm type JweEncryption interface { Encrypt(aad, plainText, cek []byte) (iv, cipherText, authTag []byte, err error) @@ -422,3 +458,28 @@ func retrieveActualKey(headers map[string]interface{}, payload string, key inter return key, nil } + +func Alg(key interface{}, jws string) func(headers map[string]interface{}, payload string) interface{} { + return func(headers map[string]interface{}, payload string) interface{} { + alg := headers["alg"].(string) + + if jws == alg { + return key + } + + return errors.New("Expected alg to be '" + jws + "' but got '" + alg + "'") + } +} + +func Enc(key interface{}, jwa string, jwe string) func(headers map[string]interface{}, payload string) interface{} { + return func(headers map[string]interface{}, payload string) interface{} { + alg := headers["alg"].(string) + enc := headers["enc"].(string) + + if jwa == alg && jwe == enc { + return key + } + + return errors.New("Expected alg to be '" + jwa + "' and enc to be '" + jwe + "' but got '" + alg + "' and '" + enc + "'") + } +} diff --git a/vendor/github.com/dvsekhvalnov/jose2go/keys/ecc/ecc.go b/vendor/github.com/dvsekhvalnov/jose2go/keys/ecc/ecc.go index b1debe48..486d8165 100644 --- a/vendor/github.com/dvsekhvalnov/jose2go/keys/ecc/ecc.go +++ b/vendor/github.com/dvsekhvalnov/jose2go/keys/ecc/ecc.go @@ -2,44 +2,44 @@ package ecc import ( - "math/big" "crypto/ecdsa" - "crypto/elliptic" - "crypto/x509" - "encoding/pem" + "crypto/elliptic" + "crypto/x509" + "encoding/pem" "errors" + "math/big" ) // ReadPublic loads ecdsa.PublicKey from given PKCS1 X509 or PKIX blobs -func ReadPublic(raw []byte) (key *ecdsa.PublicKey,err error) { +func ReadPublic(raw []byte) (key *ecdsa.PublicKey, err error) { var encoded *pem.Block - + if encoded, _ = pem.Decode(raw); encoded == nil { return nil, errors.New("Ecc.ReadPublic(): Key must be PEM encoded PKCS1 X509 certificate or PKIX EC public key") } - + var parsedKey interface{} var cert *x509.Certificate - + if parsedKey, err = x509.ParsePKIXPublicKey(encoded.Bytes); err != nil { - if cert,err = x509.ParseCertificate(encoded.Bytes);err!=nil { + if cert, err = x509.ParseCertificate(encoded.Bytes); err != nil { return nil, err } - - parsedKey=cert.PublicKey + + parsedKey = cert.PublicKey } - + var ok bool - + if key, ok = parsedKey.(*ecdsa.PublicKey); !ok { return nil, errors.New("Ecc.ReadPublic(): Key is not a valid *ecdsa.PublicKey") } - + return key, nil } // ReadPrivate loads ecdsa.PrivateKey from given PKCS1 or PKCS8 blobs -func ReadPrivate(raw []byte) (key *ecdsa.PrivateKey,err error) { +func ReadPrivate(raw []byte) (key *ecdsa.PrivateKey, err error) { var encoded *pem.Block if encoded, _ = pem.Decode(raw); encoded == nil { @@ -48,41 +48,45 @@ func ReadPrivate(raw []byte) (key *ecdsa.PrivateKey,err error) { var parsedKey interface{} - if parsedKey,err=x509.ParseECPrivateKey(encoded.Bytes);err!=nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(encoded.Bytes);err!=nil { - return nil,err + if parsedKey, err = x509.ParseECPrivateKey(encoded.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(encoded.Bytes); err != nil { + return nil, err } } var ok bool - - if key,ok=parsedKey.(*ecdsa.PrivateKey);!ok { + + if key, ok = parsedKey.(*ecdsa.PrivateKey); !ok { return nil, errors.New("Ecc.ReadPrivate(): Key is not valid *ecdsa.PrivateKey") } - - return key,nil + + return key, nil } // NewPublic constructs ecdsa.PublicKey from given (X,Y) -func NewPublic(x,y []byte) (*ecdsa.PublicKey) { - return &ecdsa.PublicKey{ Curve: curve(len(x)), - X:new(big.Int).SetBytes(x), - Y:new(big.Int).SetBytes(y) } +func NewPublic(x, y []byte) *ecdsa.PublicKey { + return &ecdsa.PublicKey{Curve: curve(len(x)), + X: new(big.Int).SetBytes(x), + Y: new(big.Int).SetBytes(y)} } // NewPrivate constructs ecdsa.PrivateKey from given (X,Y) and D -func NewPrivate(x,y,d []byte) (*ecdsa.PrivateKey) { - return &ecdsa.PrivateKey {D:new(big.Int).SetBytes(d), - PublicKey: ecdsa.PublicKey{ Curve:curve(len(x)), - X:new(big.Int).SetBytes(x), - Y:new(big.Int).SetBytes(y)}} +func NewPrivate(x, y, d []byte) *ecdsa.PrivateKey { + return &ecdsa.PrivateKey{D: new(big.Int).SetBytes(d), + PublicKey: ecdsa.PublicKey{Curve: curve(len(x)), + X: new(big.Int).SetBytes(x), + Y: new(big.Int).SetBytes(y)}} } -func curve(size int) (elliptic.Curve) { +func curve(size int) elliptic.Curve { switch size { - case 32: return elliptic.P256() - case 48: return elliptic.P384() - case 65,66: return elliptic.P521() //adjust for P-521 curve, which can be 65 or 66 bytes - default: return nil //unsupported curve + case 31, 32: + return elliptic.P256() + case 48: + return elliptic.P384() + case 65, 66: + return elliptic.P521() //adjust for P-521 curve, which can be 65 or 66 bytes + default: + return nil //unsupported curve } -} \ No newline at end of file +} diff --git a/vendor/github.com/dvsekhvalnov/jose2go/pbse2_hmac_aeskw.go b/vendor/github.com/dvsekhvalnov/jose2go/pbse2_hmac_aeskw.go index baeaf9c7..2915ae6b 100644 --- a/vendor/github.com/dvsekhvalnov/jose2go/pbse2_hmac_aeskw.go +++ b/vendor/github.com/dvsekhvalnov/jose2go/pbse2_hmac_aeskw.go @@ -4,6 +4,7 @@ import ( "crypto/sha256" "crypto/sha512" "errors" + "fmt" "hash" "github.com/dvsekhvalnov/jose2go/arrays" @@ -12,15 +13,28 @@ import ( ) func init() { - RegisterJwa(&Pbse2HmacAesKW{keySizeBits: 128, aesKW: &AesKW{keySizeBits: 128}}) - RegisterJwa(&Pbse2HmacAesKW{keySizeBits: 192, aesKW: &AesKW{keySizeBits: 192}}) - RegisterJwa(&Pbse2HmacAesKW{keySizeBits: 256, aesKW: &AesKW{keySizeBits: 256}}) + RegisterJwa(NewPbse2HmacAesKWAlg(128, 1300000, 0)) + RegisterJwa(NewPbse2HmacAesKWAlg(192, 950000, 0)) + RegisterJwa(NewPbse2HmacAesKWAlg(256, 600000, 0)) } // PBSE2 with HMAC key management algorithm implementation type Pbse2HmacAesKW struct { - keySizeBits int - aesKW JwaAlgorithm + keySizeBits int + aesKW JwaAlgorithm + maxIterations int64 + minIterations int64 +} + +func NewPbse2HmacAesKWAlg(keySize int, maxIters int64, minIters int64) JwaAlgorithm { + switch keySize { + case 128: + return &Pbse2HmacAesKW{keySizeBits: 128, maxIterations: maxIters, minIterations: minIters, aesKW: &AesKW{keySizeBits: 128}} + case 192: + return &Pbse2HmacAesKW{keySizeBits: 192, maxIterations: maxIters, minIterations: minIters, aesKW: &AesKW{keySizeBits: 192}} + default: + return &Pbse2HmacAesKW{keySizeBits: 256, maxIterations: maxIters, minIterations: minIters, aesKW: &AesKW{keySizeBits: 256}} + } } func (alg *Pbse2HmacAesKW) Name() string { @@ -46,6 +60,21 @@ func (alg *Pbse2HmacAesKW) WrapNewKey(cekSizeBits int, key interface{}, header m return nil, nil, err } + // use user provided iteration counts if any + if p2c, ok := header["p2c"].(int); ok { + iterationCount = p2c + } + + if int64(iterationCount) > alg.maxIterations { + return nil, nil, errors.New( + fmt.Sprintf("Pbse2HmacAesKW.Unwrap(): expected 'p2c' to be less than %v but got %v", alg.maxIterations, iterationCount)) + } + + if int64(iterationCount) < alg.minIterations { + return nil, nil, errors.New( + fmt.Sprintf("Pbse2HmacAesKW.Unwrap(): expected 'p2c' to be higher than %v but got %v", alg.minIterations, iterationCount)) + } + header["p2c"] = iterationCount header["p2s"] = base64url.Encode(saltInput) @@ -69,8 +98,18 @@ func (alg *Pbse2HmacAesKW) Unwrap(encryptedCek []byte, key interface{}, cekSizeB return nil, errors.New("Pbse2HmacAesKW.Unwrap(): expected 'p2c' param in JWT header, but was not found.") } + if int64(p2c) > alg.maxIterations { + return nil, errors.New( + fmt.Sprintf("Pbse2HmacAesKW.Unwrap(): expected 'p2c' to be less than %v but got %v", alg.maxIterations, p2c)) + } + + if int64(p2c) < alg.minIterations { + return nil, errors.New( + fmt.Sprintf("Pbse2HmacAesKW.Unwrap(): expected 'p2c' to be higher than %v but got %v", alg.minIterations, p2c)) + } + if p2s, ok = header["p2s"].(string); !ok { - return nil, errors.New("Pbse2HmacAesKW.Unwrap(): expected 'p2s' param in JWT header, but was not found.") + return nil, errors.New("Pbse2HmacAesKW.Unwrap(): expected 'p2s' param in JWT header, but was not found") } var saltInput []byte diff --git a/vendor/github.com/elastic/go-sysinfo/.gitignore b/vendor/github.com/elastic/go-sysinfo/.gitignore index f3827eb9..52a75b73 100644 --- a/vendor/github.com/elastic/go-sysinfo/.gitignore +++ b/vendor/github.com/elastic/go-sysinfo/.gitignore @@ -6,8 +6,6 @@ _obj *TEST.out -main.retry -testing/ssh_config -testing/ve -build/ \ No newline at end of file +build/ +**/testdata/fuzz diff --git a/vendor/github.com/elastic/go-sysinfo/.golangci.yml b/vendor/github.com/elastic/go-sysinfo/.golangci.yml new file mode 100644 index 00000000..5c0e8616 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/.golangci.yml @@ -0,0 +1,16 @@ +--- + +run: + issues-exit-code: 1 + modules-download-mode: readonly + +linters: + disable-all: true + fast: false + enable: + - goimports + - revive + +linters-settings: + goimports: + local-prefixes: github.com/elastic/go-sysinfo diff --git a/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md b/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md deleted file mode 100644 index ab907519..00000000 --- a/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md +++ /dev/null @@ -1,153 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), -and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -### Added - -### Changed - -### Deprecated - -### Removed - -### Fixed - -## [1.8.1] - -### Fixed - -- Report OS name as Windows 11 when version is >= 10.0.22000. [#118](https://github.com/elastic/go-sysinfo/issues/118) [#121](https://github.com/elastic/go-sysinfo/pull/121) - -## [1.8.0] - -### Added - -- Added the Oracle Linux ("ol") platform to the "redhat" OS family. [#54](https://github.com/elastic/go-sysinfo/issues/54) [#115](https://github.com/elastic/go-sysinfo/pull/115) -- Added the Linux Mint ("linuxmint") platform to the "debian" OS family. [#52](https://github.com/elastic/go-sysinfo/issues/52) - -### Changed - -- Updated module to require Go 1.17. [#111](https://github.com/elastic/go-sysinfo/pull/111) -- The boot time value for Windows is now rounded to the nearest second to provide a more stable value. [#53](https://github.com/elastic/go-sysinfo/issues/53) [#114](https://github.com/elastic/go-sysinfo/pull/114) - -### Fixed - -- Fix handling of environment variables without values on macOS. [#94](https://github.com/elastic/go-sysinfo/pull/94) -- Fix build tags on AIX provider such that CGO is required. [#106](https://github.com/elastic/go-sysinfo/issues/106) - -## [1.7.1] - 2021-10-11 - -### Fixed - -- Fixed getting OS info when an unsupported file or directory is found matching /etc/\*-release [#102](https://github.com/elastic/go-sysinfo/pull/102) - -## [1.7.0] - 2021-02-22 - -### Added - -- Add per-process network stats [#96](https://github.com/elastic/go-sysinfo/pull/96) - -## [1.6.0] - 2021-02-09 - -### Added - -- Add darwin/arm64 support (Apple M1). [#91](https://github.com/elastic/go-sysinfo/pull/91) - -## [1.5.0] - 2021-01-14 - -### Added - -- Added os.type field to host info. [#87](https://github.com/elastic/go-sysinfo/pull/87) - -## [1.4.0] - 2020-07-21 - -### Added - -- Add AIX support [#77](https://github.com/elastic/go-sysinfo/pull/77) -- Added detection of containerized cgroup in Kubernetes [#80](https://github.com/elastic/go-sysinfo/pull/80) - -## [1.3.0] - 2020-01-13 - -### Changed - -- Convert NetworkCountersInfo maps to uint64 [#75](https://github.com/elastic/go-sysinfo/pull/75) - -## [1.2.1] - 2020-01-03 - -### Fixed - -- Create a `sidToString` function to deal with API changes in various versions of golang.org/x/sys/windows. [#74](https://github.com/elastic/go-sysinfo/pull/74) - -## [1.2.0] - 2019-12-09 - -### Added - -- Added detection of systemd cgroups to the `IsContainerized` check. [#71](https://github.com/elastic/go-sysinfo/pull/71) -- Added networking counters for Linux hosts. [#72](https://github.com/elastic/go-sysinfo/pull/72) - -## [1.1.1] - 2019-10-29 - -### Fixed - -- Fixed an issue determining the Linux distribution for Fedora 30. [#69](https://github.com/elastic/go-sysinfo/pull/69) - -## [1.1.0] - 2019-08-22 - -### Added - -- Add `VMStat` interface for Linux. [#59](https://github.com/elastic/go-sysinfo/pull/59) - -## [1.0.2] - 2019-07-09 - -### Fixed - -- Fixed a leak when calling the CommandLineToArgv function. [#51](https://github.com/elastic/go-sysinfo/pull/51) -- Fixed a crash when calling the CommandLineToArgv function. [#58](https://github.com/elastic/go-sysinfo/pull/58) - -## [1.0.1] - 2019-05-08 - -### Fixed - -- Add support for new prometheus/procfs API. [#49](https://github.com/elastic/go-sysinfo/pull/49) - -## [1.0.0] - 2019-05-03 - -### Added - -- Add Windows provider implementation. [#22](https://github.com/elastic/go-sysinfo/pull/22) -- Add Windows process provider. [#26](https://github.com/elastic/go-sysinfo/pull/26) -- Add `OpenHandleEnumerator` and `OpenHandleCount` and implement these for Windows. [#27](https://github.com/elastic/go-sysinfo/pull/27) -- Add user info to Process. [#34](https://github.com/elastic/go-sysinfo/pull/34) -- Implement `Processes` for Darwin. [#35](https://github.com/elastic/go-sysinfo/pull/35) -- Add `Parent()` to `Process`. [#46](https://github.com/elastic/go-sysinfo/pull/46) - -### Fixed - -- Fix Windows registry handle leak. [#33](https://github.com/elastic/go-sysinfo/pull/33) -- Fix Linux host ID by search for older locations for the machine-id file. [#44](https://github.com/elastic/go-sysinfo/pull/44) - -### Changed - -- Changed the host containerized check to reduce false positives. [#42](https://github.com/elastic/go-sysinfo/pull/42) [#43](https://github.com/elastic/go-sysinfo/pull/43) - -[Unreleased]: https://github.com/elastic/go-sysinfo/compare/v1.8.1...HEAD -[1.8.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.8.1 -[1.8.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.8.0 -[1.7.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.7.1 -[1.7.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.7.0 -[1.6.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.6.0 -[1.5.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.5.0 -[1.4.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.4.0 -[1.3.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.3.0 -[1.2.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.2.1 -[1.2.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.2.0 -[1.1.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.1.0 -[1.1.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.1.0 -[1.0.2]: https://github.com/elastic/go-sysinfo/releases/tag/v1.0.2 -[1.0.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.0.1 -[1.0.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.0.0 diff --git a/vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md b/vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md new file mode 100644 index 00000000..c206aa31 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/CONTRIBUTING.md @@ -0,0 +1,16 @@ +# Contributing + +Pull requests are welcomed. You must + +- Sign the Elastic [Contributor License Agreement](https://www.elastic.co/contributor-agreement). +- Include a [changelog][changelog_docs] entry at `.changelog/{pr-number}.txt` with your pull request. +- Include tests that demonstrate the change is working. + +[changelog_docs]: https://github.com/GoogleCloudPlatform/magic-modules/blob/2834761fec3acbf35cacbffe100530f82eada650/.ci/RELEASE_NOTES_GUIDE.md#expected-format + +## Releasing + +To create a new release use the release workflow in GitHub actions. This will create a new draft +release in GitHub releases with a changelog. After the job completes, review the draft and if +everything is correct, publish the release. When the release is published GitHub will create the +git tag. diff --git a/vendor/github.com/elastic/go-sysinfo/Makefile b/vendor/github.com/elastic/go-sysinfo/Makefile index 9d3ea5c7..9d4e6b17 100644 --- a/vendor/github.com/elastic/go-sysinfo/Makefile +++ b/vendor/github.com/elastic/go-sysinfo/Makefile @@ -1,29 +1,14 @@ -GOPATH?=~/go - .phony: update update: fmt lic imports .PHONY: lic -lic: $(GOPATH)/bin/go-licenser - go-licenser +lic: + go run github.com/elastic/go-licenser@latest .PHONY: fmt -fmt: $(GOPATH)/bin/gofumpt - gofumpt -w -l ./ +fmt: + go run mvdan.cc/gofumpt@latest -w -l ./ .PHONY: imports -imports: $(GOPATH)/bin/goimports - goimports -l -local github.com/elastic/go-sysinfo ./ - -$(GOPATH)/bin/go-licenser: - @echo "go-licenser missing, installing" - go install github.com/elastic/go-licenser@latest - -$(GOPATH)/bin/gofumpt: - @echo "gofumpt missing, installing" - #Ugly boilerplate for go mod installs - cd $(mktemp -d); go mod init tmp; go get mvdan.cc/gofumpt - -$(GOPATH)/bin/goimports: - @echo "goimports missing, installing" - go install golang.org/x/tools/cmd/goimports@latest +imports: + go run golang.org/x/tools/cmd/goimports@latest -l -local github.com/elastic/go-sysinfo ./ diff --git a/vendor/github.com/elastic/go-sysinfo/README.md b/vendor/github.com/elastic/go-sysinfo/README.md index f7e7c0b1..c0f35aa9 100644 --- a/vendor/github.com/elastic/go-sysinfo/README.md +++ b/vendor/github.com/elastic/go-sysinfo/README.md @@ -1,6 +1,6 @@ # go-sysinfo -[![Build Status](https://beats-ci.elastic.co/job/Library/job/go-sysinfo-mbp/job/main/badge/icon)](https://beats-ci.elastic.co/job/Library/job/go-sysinfo-mbp/job/main/) +[![go](https://github.com/elastic/go-sysinfo/actions/workflows/go.yml/badge.svg)](https://github.com/elastic/go-sysinfo/actions/workflows/go.yml) [![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] [godocs]: http://godoc.org/github.com/elastic/go-sysinfo @@ -35,6 +35,7 @@ that are implemented. | `Info()` | x | x | x | x | | `Memory()` | x | x | x | x | | `CPUTimer` | x | x | x | x | +| `LoadAverage` | x | x | | | | `VMStat` | | x | | | | `NetworkCounters`| | x | | | @@ -59,8 +60,8 @@ This table lists the OS and architectures for which a "provider" is implemented. | GOOS / GOARCH | Requires CGO | Tested | |----------------|--------------|--------| | aix/ppc64 | x | | -| darwin/amd64 | x | x | -| darwin/arm64 | x | x | +| darwin/amd64 | optional * | x | +| darwin/arm64 | optional * | x | | linux/386 | | | | linux/amd64 | | x | | linux/arm | | | @@ -76,3 +77,5 @@ This table lists the OS and architectures for which a "provider" is implemented. | windows/amd64 | | x | | windows/arm64 | | | | windows/arm | | | + +* On darwin (macOS) host information like machineid and process information like memory, cpu, user and starttime require cgo. diff --git a/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go b/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go index 21ba2ed3..071e2d63 100644 --- a/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go +++ b/vendor/github.com/elastic/go-sysinfo/internal/registry/registry.go @@ -18,7 +18,7 @@ package registry import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/go-sysinfo/types" ) @@ -41,14 +41,14 @@ type ProcessProvider interface { func Register(provider interface{}) { if h, ok := provider.(HostProvider); ok { if hostProvider != nil { - panic(errors.Errorf("HostProvider already registered: %v", hostProvider)) + panic(fmt.Sprintf("HostProvider already registered: %v", hostProvider)) } hostProvider = h } if p, ok := provider.(ProcessProvider); ok { if processProvider != nil { - panic(errors.Errorf("ProcessProvider already registered: %v", processProvider)) + panic(fmt.Sprintf("ProcessProvider already registered: %v", processProvider)) } processProvider = p } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go index beaaa553..e158f467 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/boottime_aix_ppc64.go @@ -16,16 +16,14 @@ // under the License. //go:build aix && ppc64 -// +build aix,ppc64 package aix import ( "encoding/binary" + "fmt" "os" "time" - - "github.com/pkg/errors" ) // utmp can't be used by "encoding/binary" if generated by cgo, @@ -60,7 +58,7 @@ func bootTime(filename string) (time.Time, error) { // Get boot time from /etc/utmp file, err := os.Open(filename) if err != nil { - return time.Time{}, errors.Wrap(err, "failed to get host uptime: cannot open /etc/utmp") + return time.Time{}, fmt.Errorf("failed to get host uptime: cannot open /etc/utmp: %w", err) } defer file.Close() @@ -76,5 +74,5 @@ func bootTime(filename string) (time.Time, error) { } } - return time.Time{}, errors.Wrap(err, "failed to get host uptime: no utmp record") + return time.Time{}, fmt.Errorf("failed to get host uptime: no utmp record: %w", err) } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go index 36257cb9..ea62d205 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/host_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix @@ -31,11 +30,13 @@ package aix import "C" import ( + "errors" + "fmt" "os" + "strings" "time" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/go-sysinfo/internal/registry" "github.com/elastic/go-sysinfo/providers/shared" @@ -80,7 +81,7 @@ func (*host) CPUTime() (types.CPUTimes, error) { cpudata := C.perfstat_cpu_total_t{} if _, err := C.perfstat_cpu_total(nil, &cpudata, C.sizeof_perfstat_cpu_total_t, 1); err != nil { - return types.CPUTimes{}, errors.Wrap(err, "error while callin perfstat_cpu_total") + return types.CPUTimes{}, fmt.Errorf("error while callin perfstat_cpu_total: %w", err) } return types.CPUTimes{ @@ -100,7 +101,7 @@ func (*host) Memory() (*types.HostMemoryInfo, error) { meminfo := C.perfstat_memory_total_t{} _, err := C.perfstat_memory_total(nil, &meminfo, C.sizeof_perfstat_memory_total_t, 1) if err != nil { - return nil, errors.Wrap(err, "perfstat_memory_total failed") + return nil, fmt.Errorf("perfstat_memory_total failed: %w", err) } mem.Total = uint64(meminfo.real_total) * pagesize @@ -114,9 +115,23 @@ func (*host) Memory() (*types.HostMemoryInfo, error) { mem.VirtualFree = mem.Free + uint64(meminfo.pgsp_free)*pagesize mem.VirtualUsed = mem.VirtualTotal - mem.VirtualFree + mem.Metrics = map[string]uint64{ + "bytes_coalesced": uint64(meminfo.bytes_coalesced), + "bytes_coalesced_mempool": uint64(meminfo.bytes_coalesced_mempool), + "real_pinned": uint64(meminfo.real_pinned) * pagesize, + "pgins": uint64(meminfo.pgins), + "pgouts": uint64(meminfo.pgouts), + "pgsp_free": uint64(meminfo.pgsp_free) * pagesize, + "pgsp_rsvd": uint64(meminfo.pgsp_rsvd) * pagesize, + } + return &mem, nil } +func (h *host) FQDN() (string, error) { + return shared.FQDN() +} + func newHost() (*host, error) { h := &host{} r := &reader{} @@ -137,7 +152,7 @@ type reader struct { func (r *reader) addErr(err error) bool { if err != nil { - if errors.Cause(err) != types.ErrNotImplemented { + if !errors.Is(err, types.ErrNotImplemented) { r.errs = append(r.errs, err) } return true @@ -173,7 +188,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = v + h.info.Hostname = strings.ToLower(v) } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go index 23bc987e..dc3af830 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/kernel_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix @@ -26,9 +25,8 @@ package aix import "C" import ( + "fmt" "strconv" - - "github.com/pkg/errors" ) var oslevel string @@ -36,17 +34,17 @@ var oslevel string func getKernelVersion() (int, int, error) { name := C.struct_utsname{} if _, err := C.uname(&name); err != nil { - return 0, 0, errors.Wrap(err, "kernel version: uname") + return 0, 0, fmt.Errorf("kernel version: uname: %w", err) } version, err := strconv.Atoi(C.GoString(&name.version[0])) if err != nil { - return 0, 0, errors.Wrap(err, "parsing kernel version") + return 0, 0, fmt.Errorf("parsing kernel version: %w", err) } release, err := strconv.Atoi(C.GoString(&name.release[0])) if err != nil { - return 0, 0, errors.Wrap(err, "parsing kernel release") + return 0, 0, fmt.Errorf("parsing kernel release: %w", err) } return version, release, nil } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go index 1aab374f..945ce348 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/machineid_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix @@ -25,15 +24,13 @@ package aix */ import "C" -import ( - "github.com/pkg/errors" -) +import "fmt" // MachineID returns the id of the machine func MachineID() (string, error) { name := C.struct_utsname{} if _, err := C.uname(&name); err != nil { - return "", errors.Wrap(err, "machine id") + return "", fmt.Errorf("machine id: %w", err) } return C.GoString(&name.machine[0]), nil } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go index d10476da..d1220db9 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/os_aix_ppc64.go @@ -16,17 +16,15 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix import ( + "fmt" "io/ioutil" "strconv" "strings" - "github.com/pkg/errors" - "github.com/elastic/go-sysinfo/types" ) @@ -44,7 +42,7 @@ func getOSInfo() (*types.OSInfo, error) { // Retrieve build version from "/proc/version". procVersion, err := ioutil.ReadFile("/proc/version") if err != nil { - return nil, errors.Wrap(err, "failed to get OS info: cannot open /proc/version") + return nil, fmt.Errorf("failed to get OS info: cannot open /proc/version: %w", err) } build := strings.SplitN(string(procVersion), "\n", 4)[2] diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go index e586decf..cfa35f2a 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/process_aix_ppc64.go @@ -16,7 +16,6 @@ // under the License. //go:build aix && ppc64 && cgo -// +build aix,ppc64,cgo package aix @@ -33,6 +32,8 @@ import "C" import ( "bytes" "encoding/binary" + "errors" + "fmt" "io" "io/ioutil" "os" @@ -43,8 +44,6 @@ import ( "time" "unsafe" - "github.com/pkg/errors" - "github.com/elastic/go-sysinfo/types" ) @@ -54,7 +53,7 @@ func (aixSystem) Processes() ([]types.Process, error) { // getprocs which will also retrieve kernel threads. files, err := ioutil.ReadDir("/proc") if err != nil { - return nil, errors.Wrap(err, "error while reading /proc") + return nil, fmt.Errorf("error while reading /proc: %w", err) } processes := make([]types.Process, 0, len(files)) @@ -121,7 +120,7 @@ func (p *process) Info() (types.ProcessInfo, error) { err = syscall.ESRCH } if err != nil { - return types.ProcessInfo{}, errors.Wrap(err, "error while calling getprocs") + return types.ProcessInfo{}, fmt.Errorf("error while calling getprocs: %w", err) } p.info.PPID = int(info.pi_ppid) @@ -133,7 +132,7 @@ func (p *process) Info() (types.ProcessInfo, error) { buf := make([]byte, 8192) var args []string if _, err := C.getargs(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, (*C.char)(&buf[0]), 8192); err != nil { - return types.ProcessInfo{}, errors.Wrap(err, "error while calling getargs") + return types.ProcessInfo{}, fmt.Errorf("error while calling getargs: %w", err) } bbuf := bytes.NewBuffer(buf) @@ -143,7 +142,7 @@ func (p *process) Info() (types.ProcessInfo, error) { break } if err != nil { - return types.ProcessInfo{}, errors.Wrap(err, "error while reading arguments") + return types.ProcessInfo{}, fmt.Errorf("error while reading arguments: %w", err) } args = append(args, string(chop(arg))) @@ -183,7 +182,7 @@ func (p *process) Info() (types.ProcessInfo, error) { cwd, err := os.Readlink("/proc/" + strconv.Itoa(p.pid) + "/cwd") if err != nil { if !os.IsNotExist(err) { - return types.ProcessInfo{}, errors.Wrapf(err, "error while reading /proc/%s/cwd", strconv.Itoa(p.pid)) + return types.ProcessInfo{}, fmt.Errorf("error while reading /proc/%s/cwd: %w", strconv.Itoa(p.pid), err) } } @@ -205,7 +204,7 @@ func (p *process) Environment() (map[string]string, error) { info.pi_pid = C.pid_t(p.pid) if _, err := C.getevars(unsafe.Pointer(&info), C.sizeof_struct_procsinfo64, (*C.char)(&buf[0]), 8192); err != nil { - return nil, errors.Wrap(err, "error while calling getevars") + return nil, fmt.Errorf("error while calling getevars: %w", err) } bbuf := bytes.NewBuffer(buf) @@ -218,12 +217,12 @@ func (p *process) Environment() (map[string]string, error) { break } if err != nil { - return nil, errors.Wrap(err, "error while calling getevars") + return nil, fmt.Errorf("error while calling getevars: %w", err) } pair := bytes.SplitN(chop(line), delim, 2) if len(pair) != 2 { - return nil, errors.Wrap(err, "error reading process environment") + return nil, errors.New("error reading process environment") } p.env[string(pair[0])] = string(pair[1]) } @@ -260,7 +259,7 @@ func (p *process) Memory() (types.MemoryInfo, error) { err = syscall.ESRCH } if err != nil { - return types.MemoryInfo{}, errors.Wrap(err, "error while calling getprocs") + return types.MemoryInfo{}, fmt.Errorf("error while calling getprocs: %w", err) } mem.Resident = uint64(info.pi_drss+info.pi_trss) * pagesize @@ -286,12 +285,12 @@ func (p *process) decodeProcfsFile(name string, data interface{}) error { file, err := os.Open(fileName) if err != nil { - return errors.Wrapf(err, "error while opening %s", fileName) + return fmt.Errorf("error while opening %s: %w", fileName, err) } defer file.Close() if err := binary.Read(file, binary.BigEndian, data); err != nil { - return errors.Wrapf(err, "error while decoding %s", fileName) + return fmt.Errorf("error while decoding %s: %w", fileName, err) } return nil diff --git a/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go b/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go index fb60e7dd..0e369bb6 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/aix/ztypes_aix_ppc64.go @@ -19,7 +19,6 @@ // cgo -godefs defs_aix.go //go:build aix && ppc64 -// +build aix,ppc64 package aix diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go index 7236ce64..8b3ed911 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/arch_darwin.go @@ -15,23 +15,22 @@ // specific language governing permissions and limitations // under the License. -//go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo +//go:build amd64 || arm64 package darwin import ( - "syscall" + "fmt" - "github.com/pkg/errors" + "golang.org/x/sys/unix" ) const hardwareMIB = "hw.machine" func Architecture() (string, error) { - arch, err := syscall.Sysctl(hardwareMIB) + arch, err := unix.Sysctl(hardwareMIB) if err != nil { - return "", errors.Wrap(err, "failed to get architecture") + return "", fmt.Errorf("failed to get architecture: %w", err) } return arch, nil diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go index cf3a5b3c..1954e2a2 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/boottime_darwin.go @@ -15,24 +15,23 @@ // specific language governing permissions and limitations // under the License. -//go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo +//go:build amd64 || arm64 package darwin import ( - "syscall" + "fmt" "time" - "github.com/pkg/errors" + "golang.org/x/sys/unix" ) const kernBoottimeMIB = "kern.boottime" func BootTime() (time.Time, error) { - var tv syscall.Timeval - if err := sysctlByName(kernBoottimeMIB, &tv); err != nil { - return time.Time{}, errors.Wrap(err, "failed to get host uptime") + tv, err := unix.SysctlTimeval(kernBoottimeMIB) + if err != nil { + return time.Time{}, fmt.Errorf("failed to get host uptime: %w", err) } bootTime := time.Unix(int64(tv.Sec), int64(tv.Usec)*int64(time.Microsecond)) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go index 73d2391c..9e369d36 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/host_darwin.go @@ -15,17 +15,18 @@ // specific language governing permissions and limitations // under the License. -//go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo +//go:build amd64 || arm64 package darwin import ( + "errors" + "fmt" "os" + "strings" "time" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/go-sysinfo/internal/registry" "github.com/elastic/go-sysinfo/providers/shared" @@ -53,7 +54,7 @@ func (h *host) Info() types.HostInfo { func (h *host) CPUTime() (types.CPUTimes, error) { cpu, err := getHostCPULoadInfo() if err != nil { - return types.CPUTimes{}, errors.Wrap(err, "failed to get host CPU usage") + return types.CPUTimes{}, fmt.Errorf("failed to get host CPU usage: %w", err) } ticksPerSecond := time.Duration(getClockTicks()) @@ -70,26 +71,37 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { var mem types.HostMemoryInfo // Total physical memory. - if err := sysctlByName("hw.memsize", &mem.Total); err != nil { - return nil, errors.Wrap(err, "failed to get total physical memory") + total, err := MemTotal() + if err != nil { + return nil, fmt.Errorf("failed to get total physical memory: %w", err) } + mem.Total = total + // Page size for computing byte totals. pageSizeBytes, err := getPageSize() if err != nil { - return nil, errors.Wrap(err, "failed to get page size") + return nil, fmt.Errorf("failed to get page size: %w", err) + } + + // Swap + swap, err := getSwapUsage() + if err != nil { + return nil, fmt.Errorf("failed to get swap usage: %w", err) } + mem.VirtualTotal = swap.Total + mem.VirtualUsed = swap.Used + mem.VirtualFree = swap.Available + // Virtual Memory Statistics vmStat, err := getHostVMInfo64() - if err != nil { - return nil, errors.Wrap(err, "failed to get virtual memory statistics") + if errors.Is(err, types.ErrNotImplemented) { + return &mem, nil } - // Swap - swap, err := getSwapUsage() if err != nil { - return nil, errors.Wrap(err, "failed to get swap usage") + return nil, fmt.Errorf("failed to get virtual memory statistics: %w", err) } inactiveBytes := uint64(vmStat.Inactive_count) * pageSizeBytes @@ -123,13 +135,29 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { mem.Used = uint64(vmStat.Internal_page_count+vmStat.Wire_count+vmStat.Compressor_page_count) * pageSizeBytes mem.Free = uint64(vmStat.Free_count) * pageSizeBytes mem.Available = mem.Free + inactiveBytes + purgeableBytes - mem.VirtualTotal = swap.Total - mem.VirtualUsed = swap.Used - mem.VirtualFree = swap.Available return &mem, nil } +func (h *host) FQDN() (string, error) { + return shared.FQDN() +} + +func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { + load, err := getLoadAverage() + if err != nil { + return nil, fmt.Errorf("failed to get loadavg: %w", err) + } + + scale := float64(load.scale) + + return &types.LoadAverageInfo{ + One: float64(load.load[0]) / scale, + Five: float64(load.load[1]) / scale, + Fifteen: float64(load.load[2]) / scale, + }, nil +} + func newHost() (*host, error) { h := &host{} r := &reader{} @@ -150,7 +178,7 @@ type reader struct { func (r *reader) addErr(err error) bool { if err != nil { - if errors.Cause(err) != types.ErrNotImplemented { + if !errors.Is(err, types.ErrNotImplemented) { r.errs = append(r.errs, err) } return true @@ -186,7 +214,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = v + h.info.Hostname = strings.ToLower(v) } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go index 7d574161..72462575 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/kernel_darwin.go @@ -16,14 +16,12 @@ // under the License. //go:build !386 -// +build !386 package darwin import ( + "fmt" "syscall" - - "github.com/pkg/errors" ) const kernelReleaseMIB = "kern.osrelease" @@ -31,7 +29,7 @@ const kernelReleaseMIB = "kern.osrelease" func KernelVersion() (string, error) { version, err := syscall.Sysctl(kernelReleaseMIB) if err != nil { - return "", errors.Wrap(err, "failed to get kernel version") + return "", fmt.Errorf("failed to get kernel version: %w", err) } return version, nil diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go new file mode 100644 index 00000000..34f3a347 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/load_average_darwin.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build amd64 || arm64 + +package darwin + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const loadAverage = "vm.loadavg" + +type loadAvg struct { + load [3]uint32 + scale int +} + +func getLoadAverage() (*loadAvg, error) { + data, err := unix.SysctlRaw(loadAverage) + if err != nil { + return nil, err + } + + load := *(*loadAvg)(unsafe.Pointer((&data[0]))) + + return &load, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go index 90c5becb..4339366a 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_darwin.go @@ -16,7 +16,6 @@ // under the License. //go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo package darwin @@ -25,9 +24,8 @@ package darwin import "C" import ( + "fmt" "unsafe" - - "github.com/pkg/errors" ) // MachineID returns the Hardware UUID also accessible via @@ -45,17 +43,17 @@ func getHostUUID() (string, error) { ret, err := C.gethostuuid(&id[0], &wait) if ret != 0 { if err != nil { - return "", errors.Wrapf(err, "gethostuuid failed with %v", ret) + return "", fmt.Errorf("gethostuuid failed with %v: %w", ret, err) } - return "", errors.Errorf("gethostuuid failed with %v", ret) + return "", fmt.Errorf("gethostuuid failed with %v", ret) } var uuidStringC C.uuid_string_t var uuid [unsafe.Sizeof(uuidStringC)]C.char _, err = C.uuid_unparse_upper(&id[0], &uuid[0]) if err != nil { - return "", errors.Wrap(err, "uuid_unparse_upper failed") + return "", fmt.Errorf("uuid_unparse_upper failed: %w", err) } return C.GoString(&uuid[0]), nil diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go new file mode 100644 index 00000000..a692fdee --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/machineid_nocgo_darwin.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && !cgo) || (arm64 && !cgo) + +package darwin + +import ( + "fmt" + + "github.com/elastic/go-sysinfo/types" +) + +func MachineID() (string, error) { + return "", fmt.Errorf("machineid requires cgo: %w", types.ErrNotImplemented) +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go index c147b818..73dd7cf8 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/memory_darwin.go @@ -15,21 +15,22 @@ // specific language governing permissions and limitations // under the License. -//go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo +//go:build amd64 || arm64 package darwin import ( - "github.com/pkg/errors" + "fmt" + + "golang.org/x/sys/unix" ) const hwMemsizeMIB = "hw.memsize" func MemTotal() (uint64, error) { - var size uint64 - if err := sysctlByName(hwMemsizeMIB, &size); err != nil { - return 0, errors.Wrap(err, "failed to get mem total") + size, err := unix.SysctlUint64(hwMemsizeMIB) + if err != nil { + return 0, fmt.Errorf("failed to get mem total: %w", err) } return size, nil diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go index 686a0c7a..0dbe8473 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/os.go @@ -18,11 +18,11 @@ package darwin import ( + "fmt" "io/ioutil" "strconv" "strings" - "github.com/pkg/errors" "howett.net/plist" "github.com/elastic/go-sysinfo/types" @@ -39,7 +39,7 @@ const ( func OperatingSystem() (*types.OSInfo, error) { data, err := ioutil.ReadFile(systemVersionPlist) if err != nil { - return nil, errors.Wrap(err, "failed to read plist file") + return nil, fmt.Errorf("failed to read plist file: %w", err) } return getOSInfo(data) @@ -48,22 +48,22 @@ func OperatingSystem() (*types.OSInfo, error) { func getOSInfo(data []byte) (*types.OSInfo, error) { attrs := map[string]string{} if _, err := plist.Unmarshal(data, &attrs); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal plist data") + return nil, fmt.Errorf("failed to unmarshal plist data: %w", err) } productName, found := attrs[plistProductName] if !found { - return nil, errors.Errorf("plist key %v not found", plistProductName) + return nil, fmt.Errorf("plist key %v not found", plistProductName) } version, found := attrs[plistProductVersion] if !found { - return nil, errors.Errorf("plist key %v not found", plistProductVersion) + return nil, fmt.Errorf("plist key %v not found", plistProductVersion) } build, found := attrs[plistProductBuildVersion] if !found { - return nil, errors.Errorf("plist key %v not found", plistProductBuildVersion) + return nil, fmt.Errorf("plist key %v not found", plistProductBuildVersion) } var major, minor, patch int diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go new file mode 100644 index 00000000..6a72f9fb --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_cgo_darwin.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && cgo) || (arm64 && cgo) + +package darwin + +// #include +// #include +import "C" + +import ( + "errors" + "unsafe" +) + +//go:generate sh -c "go tool cgo -godefs defs_darwin.go > ztypes_darwin.go" + +func getProcTaskAllInfo(pid int, info *procTaskAllInfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n, err := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) + if err != nil { + return err + } else if n != size { + return errors.New("failed to read process info with proc_pidinfo") + } + + return nil +} + +func getProcVnodePathInfo(pid int, info *procVnodePathInfo) error { + size := C.int(unsafe.Sizeof(*info)) + ptr := unsafe.Pointer(info) + + n := C.proc_pidinfo(C.int(pid), C.PROC_PIDVNODEPATHINFO, 0, ptr, size) + if n != size { + return errors.New("failed to read vnode info with proc_pidinfo") + } + + return nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go index 025e36aa..7c73b69b 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_darwin.go @@ -15,63 +15,46 @@ // specific language governing permissions and limitations // under the License. -//go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo +//go:build amd64 || arm64 package darwin -// #cgo LDFLAGS:-lproc -// #include -// #include -import "C" - import ( "bytes" "encoding/binary" + "errors" + "fmt" "os" "strconv" + "strings" + "syscall" "time" - "unsafe" - "github.com/pkg/errors" + "golang.org/x/sys/unix" "github.com/elastic/go-sysinfo/types" ) -//go:generate sh -c "go tool cgo -godefs defs_darwin.go > ztypes_darwin.go" +var errInvalidProcargs2Data = errors.New("invalid kern.procargs2 data") func (s darwinSystem) Processes() ([]types.Process, error) { - n, err := C.proc_listallpids(nil, 0) - if err != nil { - return nil, errors.Wrapf(err, "error getting process count from proc_listallpids (n = %v)", n) - } else if n <= 0 { - return nil, errors.Errorf("proc_listallpids returned %v", n) - } - - var pid C.int - bufsize := n * C.int(unsafe.Sizeof(pid)) - buf := make([]byte, bufsize) - n, err = C.proc_listallpids(unsafe.Pointer(&buf[0]), bufsize) + ps, err := unix.SysctlKinfoProcSlice("kern.proc.all") if err != nil { - return nil, errors.Wrapf(err, "error getting processes from proc_listallpids (n = %v)", n) - } else if n <= 0 { - return nil, errors.Errorf("proc_listallpids returned %v", n) + return nil, fmt.Errorf("failed to read process table: %w", err) } - bbuf := bytes.NewBuffer(buf) - processes := make([]types.Process, 0, n) - for i := 0; i < int(n); i++ { - err = binary.Read(bbuf, binary.LittleEndian, &pid) - if err != nil { - return nil, errors.Wrap(err, "error reading binary list of PIDs") - } - + processes := make([]types.Process, 0, len(ps)) + for _, kp := range ps { + pid := kp.Proc.P_pid if pid == 0 { continue } - processes = append(processes, &process{pid: int(pid)}) + processes = append(processes, &process{ + pid: int(pid), + }) } + return processes, nil } @@ -113,12 +96,12 @@ func (p *process) Info() (types.ProcessInfo, error) { } var task procTaskAllInfo - if err := getProcTaskAllInfo(p.pid, &task); err != nil { + if err := getProcTaskAllInfo(p.pid, &task); err != nil && err != types.ErrNotImplemented { return types.ProcessInfo{}, err } var vnode procVnodePathInfo - if err := getProcVnodePathInfo(p.pid, &vnode); err != nil { + if err := getProcVnodePathInfo(p.pid, &vnode); err != nil && err != types.ErrNotImplemented { return types.ProcessInfo{}, err } @@ -141,18 +124,23 @@ func (p *process) Info() (types.ProcessInfo, error) { } func (p *process) User() (types.UserInfo, error) { - var task procTaskAllInfo - if err := getProcTaskAllInfo(p.pid, &task); err != nil { + kproc, err := unix.SysctlKinfoProc("kern.proc.pid", p.pid) + if err != nil { return types.UserInfo{}, err } + egid := "" + if len(kproc.Eproc.Ucred.Groups) > 0 { + egid = strconv.Itoa(int(kproc.Eproc.Ucred.Groups[0])) + } + return types.UserInfo{ - UID: strconv.Itoa(int(task.Pbsd.Pbi_ruid)), - EUID: strconv.Itoa(int(task.Pbsd.Pbi_uid)), - SUID: strconv.Itoa(int(task.Pbsd.Pbi_svuid)), - GID: strconv.Itoa(int(task.Pbsd.Pbi_rgid)), - EGID: strconv.Itoa(int(task.Pbsd.Pbi_gid)), - SGID: strconv.Itoa(int(task.Pbsd.Pbi_svgid)), + UID: strconv.Itoa(int(kproc.Eproc.Pcred.P_ruid)), + EUID: strconv.Itoa(int(kproc.Eproc.Ucred.Uid)), + SUID: strconv.Itoa(int(kproc.Eproc.Pcred.P_svuid)), + GID: strconv.Itoa(int(kproc.Eproc.Pcred.P_rgid)), + SGID: strconv.Itoa(int(kproc.Eproc.Pcred.P_svgid)), + EGID: egid, }, nil } @@ -186,69 +174,48 @@ func (p *process) Memory() (types.MemoryInfo, error) { }, nil } -func getProcTaskAllInfo(pid int, info *procTaskAllInfo) error { - size := C.int(unsafe.Sizeof(*info)) - ptr := unsafe.Pointer(info) - - n, err := C.proc_pidinfo(C.int(pid), C.PROC_PIDTASKALLINFO, 0, ptr, size) - if err != nil { - return err - } else if n != size { - return errors.New("failed to read process info with proc_pidinfo") - } - - return nil -} - -func getProcVnodePathInfo(pid int, info *procVnodePathInfo) error { - size := C.int(unsafe.Sizeof(*info)) - ptr := unsafe.Pointer(info) - - n := C.proc_pidinfo(C.int(pid), C.PROC_PIDVNODEPATHINFO, 0, ptr, size) - if n != size { - return errors.New("failed to read vnode info with proc_pidinfo") - } - - return nil -} - -var nullTerminator = []byte{0} - // wrapper around sysctl KERN_PROCARGS2 // callbacks params are optional, // up to the caller as to which pieces of data they want func kern_procargs(pid int, p *process) error { - mib := []C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} - var data []byte - if err := sysctl(mib, &data); err != nil { - return nil + data, err := unix.SysctlRaw("kern.procargs2", pid) + if err != nil { + if errors.Is(err, syscall.EINVAL) { + // sysctl returns "invalid argument" for both "no such process" + // and "operation not permitted" errors. + return fmt.Errorf("no such process or operation not permitted: %w", err) + } + return err } - buf := bytes.NewBuffer(data) + return parseKernProcargs2(data, p) +} + +func parseKernProcargs2(data []byte, p *process) error { // argc - var argc int32 - if err := binary.Read(buf, binary.LittleEndian, &argc); err != nil { - return err + if len(data) < 4 { + return errInvalidProcargs2Data } + argc := binary.LittleEndian.Uint32(data) + data = data[4:] // exe - lines := bytes.Split(buf.Bytes(), nullTerminator) - p.exe = string(lines[0]) + lines := strings.Split(string(data), "\x00") + p.exe = lines[0] lines = lines[1:] - // skip nulls + // Skip nulls that may be appended after the exe. for len(lines) > 0 { - if len(lines[0]) == 0 { - lines = lines[1:] - continue + if lines[0] != "" { + break } - break + lines = lines[1:] } - // args - for i := 0; i < int(argc); i++ { - p.args = append(p.args, string(lines[0])) - lines = lines[1:] + // argv + if c := min(argc, uint32(len(lines))); c > 0 { + p.args = lines[:c] + lines = lines[c:] } // env vars @@ -258,13 +225,8 @@ func kern_procargs(pid int, p *process) error { break } - parts := bytes.SplitN(l, []byte{'='}, 2) - key := string(parts[0]) - var value string - if len(parts) == 2 { - value = string(parts[1]) - } - env[key] = value + key, val, _ := strings.Cut(l, "=") + env[key] = val } p.env = env @@ -283,3 +245,10 @@ func int8SliceToString(s []int8) string { } return buf.String() } + +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_nocgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_nocgo_darwin.go new file mode 100644 index 00000000..0ca7a869 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/process_nocgo_darwin.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && !cgo) || (arm64 && !cgo) + +package darwin + +import "github.com/elastic/go-sysinfo/types" + +func getProcTaskAllInfo(pid int, info *procTaskAllInfo) error { + return types.ErrNotImplemented +} + +func getProcVnodePathInfo(pid int, info *procVnodePathInfo) error { + return types.ErrNotImplemented +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go new file mode 100644 index 00000000..ce4ee108 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_cgo_darwin.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && cgo) || (arm64 && cgo) + +package darwin + +/* +#cgo LDFLAGS:-lproc +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +func getHostCPULoadInfo() (*cpuUsage, error) { + var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT + var cpu cpuUsage + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpu)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics returned status %d", status) + } + + return &cpu, nil +} + +// getClockTicks returns the number of click ticks in one jiffie. +func getClockTicks() int { + return int(C.sysconf(C._SC_CLK_TCK)) +} + +func getHostVMInfo64() (*vmStatistics64Data, error) { + var count C.mach_msg_type_number_t = C.HOST_VM_INFO64_COUNT + + var vmStat vmStatistics64Data + status := C.host_statistics64( + C.host_t(C.mach_host_self()), + C.HOST_VM_INFO64, + C.host_info_t(unsafe.Pointer(&vmStat)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics64 returned status %d", status) + } + + return &vmStat, nil +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go index 52b1dab6..fe14050a 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_darwin.go @@ -15,157 +15,18 @@ // specific language governing permissions and limitations // under the License. -//go:build (amd64 && cgo) || (arm64 && cgo) -// +build amd64,cgo arm64,cgo +//go:build amd64 || arm64 package darwin -/* -#cgo LDFLAGS:-lproc -#include -#include -#include -#include -*/ -import "C" - import ( "bytes" "encoding/binary" "fmt" - "sync" - "syscall" - "unsafe" - "github.com/pkg/errors" + "golang.org/x/sys/unix" ) -// Single-word zero for use when we need a valid pointer to 0 bytes. -// See mksyscall.pl. -var _zero uintptr - -// Buffer Pool - -var bufferPool = sync.Pool{ - New: func() interface{} { - return &poolMem{ - buf: make([]byte, argMax), - } - }, -} - -type poolMem struct { - buf []byte - pool *sync.Pool -} - -func getPoolMem() *poolMem { - pm := bufferPool.Get().(*poolMem) - pm.buf = pm.buf[0:cap(pm.buf)] - pm.pool = &bufferPool - return pm -} - -func (m *poolMem) Release() { m.pool.Put(m) } - -// Common errors. - -// Do the interface allocations only once for common -// Errno values. -var ( - errEAGAIN error = syscall.EAGAIN - errEINVAL error = syscall.EINVAL - errENOENT error = syscall.ENOENT -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case syscall.EAGAIN: - return errEAGAIN - case syscall.EINVAL: - return errEINVAL - case syscall.ENOENT: - return errENOENT - } - return e -} - -func _sysctl(mib []C.int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// Translate "kern.hostname" to []_C_int{0,1,2,3}. -func nametomib(name string) (mib []C.int, err error) { - const siz = unsafe.Sizeof(mib[0]) - - // NOTE(rsc): It seems strange to set the buffer to have - // size CTL_MAXNAME+2 but use only CTL_MAXNAME - // as the size. I don't know why the +2 is here, but the - // kernel uses +2 for its own implementation of this function. - // I am scared that if we don't include the +2 here, the kernel - // will silently write 2 words farther than we specify - // and we'll get memory corruption. - var buf [C.CTL_MAXNAME + 2]C.int - n := uintptr(C.CTL_MAXNAME) * siz - - p := (*byte)(unsafe.Pointer(&buf[0])) - bytes, err := syscall.ByteSliceFromString(name) - if err != nil { - return nil, err - } - - // Magic sysctl: "setting" 0.3 to a string name - // lets you read back the array of integers form. - if err = _sysctl([]C.int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil { - return nil, err - } - return buf[0 : n/siz], nil -} - -func sysctl(mib []C.int, value interface{}) error { - mem := getPoolMem() - defer mem.Release() - - size := uintptr(len(mem.buf)) - if err := _sysctl(mib, &mem.buf[0], &size, nil, 0); err != nil { - return err - } - data := mem.buf[0:size] - - switch v := value.(type) { - case *[]byte: - out := make([]byte, len(data)) - copy(out, data) - *v = out - return nil - default: - return binary.Read(bytes.NewReader(data), binary.LittleEndian, v) - } -} - -func sysctlByName(name string, out interface{}) error { - mib, err := nametomib(name) - if err != nil { - return err - } - - return sysctl(mib, out) -} - type cpuUsage struct { User uint32 System uint32 @@ -173,53 +34,13 @@ type cpuUsage struct { Nice uint32 } -func getHostCPULoadInfo() (*cpuUsage, error) { - var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT - var cpu cpuUsage - status := C.host_statistics(C.host_t(C.mach_host_self()), - C.HOST_CPU_LOAD_INFO, - C.host_info_t(unsafe.Pointer(&cpu)), - &count) - - if status != C.KERN_SUCCESS { - return nil, errors.Errorf("host_statistics returned status %d", status) - } - - return &cpu, nil -} - -// getClockTicks returns the number of click ticks in one jiffie. -func getClockTicks() int { - return int(C.sysconf(C._SC_CLK_TCK)) -} - -func getHostVMInfo64() (*vmStatistics64Data, error) { - var count C.mach_msg_type_number_t = C.HOST_VM_INFO64_COUNT - - var vmStat vmStatistics64Data - status := C.host_statistics64( - C.host_t(C.mach_host_self()), - C.HOST_VM_INFO64, - C.host_info_t(unsafe.Pointer(&vmStat)), - &count) - - if status != C.KERN_SUCCESS { - return nil, fmt.Errorf("host_statistics64 returned status %d", status) - } - - return &vmStat, nil -} - func getPageSize() (uint64, error) { - var pageSize vmSize - status := C.host_page_size( - C.host_t(C.mach_host_self()), - (*C.vm_size_t)(unsafe.Pointer(&pageSize))) - if status != C.KERN_SUCCESS { - return 0, errors.Errorf("host_page_size returned status %d", status) + i, err := unix.SysctlUint32("vm.pagesize") + if err != nil { + return 0, fmt.Errorf("vm.pagesize returned %w", err) } - return uint64(pageSize), nil + return uint64(i), nil } // From sysctl.h - xsw_usage. @@ -234,8 +55,14 @@ const vmSwapUsageMIB = "vm.swapusage" func getSwapUsage() (*swapUsage, error) { var swap swapUsage - if err := sysctlByName(vmSwapUsageMIB, &swap); err != nil { + data, err := unix.SysctlRaw(vmSwapUsageMIB) + if err != nil { return nil, err } + + if err := binary.Read(bytes.NewReader(data), binary.LittleEndian, &swap); err != nil { + return nil, err + } + return &swap, nil } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go new file mode 100644 index 00000000..6a74d8d8 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/darwin/syscall_nocgo_darwin.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build (amd64 && !cgo) || (arm64 && !cgo) + +package darwin + +import ( + "fmt" + + "github.com/elastic/go-sysinfo/types" +) + +func getHostCPULoadInfo() (*cpuUsage, error) { + return nil, fmt.Errorf("host cpu load requires cgo: %w", types.ErrNotImplemented) +} + +// getClockTicks returns the number of click ticks in one jiffie. +func getClockTicks() int { + return 0 +} + +func getHostVMInfo64() (*vmStatistics64Data, error) { + return nil, fmt.Errorf("host vm info requires cgo: %w", types.ErrNotImplemented) +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go index 13a9574c..e1d28936 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/arch_linux.go @@ -18,15 +18,14 @@ package linux import ( + "fmt" "syscall" - - "github.com/pkg/errors" ) func Architecture() (string, error) { var uname syscall.Utsname if err := syscall.Uname(&uname); err != nil { - return "", errors.Wrap(err, "architecture") + return "", fmt.Errorf("architecture: %w", err) } data := make([]byte, 0, len(uname.Machine)) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go index e229d54e..58665a7c 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/boottime_linux.go @@ -37,7 +37,7 @@ func bootTime(fs procfs.FS) (time.Time, error) { return bootTimeValue, nil } - stat, err := fs.NewStat() + stat, err := fs.Stat() if err != nil { return time.Time{}, err } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go index c13e0402..40bf454d 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/capabilities_linux.go @@ -26,8 +26,10 @@ import ( // capabilityNames is mapping of capability constant values to names. // // Generated with: -// curl -s https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/capability.h | \ -// grep -P '^#define CAP_\w+\s+\d+' | perl -pe 's/#define (\w+)\s+(\d+)/\2: "\1",/g' +// +// curl -s https://raw.githubusercontent.com/torvalds/linux/master/include/uapi/linux/capability.h | \ +// grep -P '^#define CAP_\w+\s+\d+' | \ +// perl -pe 's/#define CAP_(\w+)\s+(\d+)/\2: "\L\1",/g' var capabilityNames = map[int]string{ 0: "chown", 1: "dac_override", @@ -67,6 +69,9 @@ var capabilityNames = map[int]string{ 35: "wake_alarm", 36: "block_suspend", 37: "audit_read", + 38: "perfmon", + 39: "bpf", + 40: "checkpoint_restore", } func capabilityName(num int) string { @@ -81,7 +86,7 @@ func capabilityName(num int) string { func readCapabilities(content []byte) (*types.CapabilityInfo, error) { var cap types.CapabilityInfo - err := parseKeyValue(content, ":", func(key, value []byte) error { + err := parseKeyValue(content, ':', func(key, value []byte) error { var err error switch string(key) { case "CapInh": diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go index fc668659..7eee188e 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/container.go @@ -20,10 +20,9 @@ package linux import ( "bufio" "bytes" + "fmt" "io/ioutil" "os" - - "github.com/pkg/errors" ) const procOneCgroup = "/proc/1/cgroup" @@ -36,7 +35,7 @@ func IsContainerized() (bool, error) { return false, nil } - return false, errors.Wrap(err, "failed to read process cgroups") + return false, fmt.Errorf("failed to read process cgroups: %w", err) } return isContainerizedCgroup(data) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go index 7727e722..cd6c0106 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go @@ -18,13 +18,15 @@ package linux import ( + "errors" + "fmt" "io/ioutil" "os" "path/filepath" + "strings" "time" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/prometheus/procfs" "github.com/elastic/go-sysinfo/internal/registry" @@ -71,6 +73,10 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { return parseMemInfo(content) } +func (h *host) FQDN() (string, error) { + return shared.FQDN() +} + // VMStat reports data from /proc/vmstat on linux. func (h *host) VMStat() (*types.VMStatInfo, error) { content, err := ioutil.ReadFile(h.procFS.path("vmstat")) @@ -81,6 +87,20 @@ func (h *host) VMStat() (*types.VMStatInfo, error) { return parseVMStat(content) } +// LoadAverage reports data from /proc/loadavg on linux. +func (h *host) LoadAverage() (*types.LoadAverageInfo, error) { + loadAvg, err := h.procFS.LoadAvg() + if err != nil { + return nil, err + } + + return &types.LoadAverageInfo{ + One: loadAvg.Load1, + Five: loadAvg.Load5, + Fifteen: loadAvg.Load15, + }, nil +} + // NetworkCounters reports data from /proc/net on linux func (h *host) NetworkCounters() (*types.NetworkCountersInfo, error) { snmpRaw, err := ioutil.ReadFile(h.procFS.path("net/snmp")) @@ -105,7 +125,7 @@ func (h *host) NetworkCounters() (*types.NetworkCountersInfo, error) { } func (h *host) CPUTime() (types.CPUTimes, error) { - stat, err := h.procFS.NewStat() + stat, err := h.procFS.Stat() if err != nil { return types.CPUTimes{}, err } @@ -123,9 +143,9 @@ func (h *host) CPUTime() (types.CPUTimes, error) { } func newHost(fs procFS) (*host, error) { - stat, err := fs.NewStat() + stat, err := fs.Stat() if err != nil { - return nil, errors.Wrap(err, "failed to read proc stat") + return nil, fmt.Errorf("failed to read proc stat: %w", err) } h := &host{stat: stat, procFS: fs} @@ -139,6 +159,7 @@ func newHost(fs procFS) (*host, error) { r.os(h) r.time(h) r.uniqueID(h) + return h, r.Err() } @@ -148,7 +169,7 @@ type reader struct { func (r *reader) addErr(err error) bool { if err != nil { - if errors.Cause(err) != types.ErrNotImplemented { + if !errors.Is(err, types.ErrNotImplemented) { r.errs = append(r.errs, err) } return true @@ -192,7 +213,7 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = v + h.info.Hostname = strings.ToLower(v) } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go index 1b88e479..1695fb81 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/kernel_linux.go @@ -18,15 +18,14 @@ package linux import ( + "fmt" "syscall" - - "github.com/pkg/errors" ) func KernelVersion() (string, error) { var uname syscall.Utsname if err := syscall.Uname(&uname); err != nil { - return "", errors.Wrap(err, "kernel version") + return "", fmt.Errorf("kernel version: %w", err) } data := make([]byte, 0, len(uname.Release)) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go index 7a6b8a70..adfcd109 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/machineid.go @@ -19,11 +19,10 @@ package linux import ( "bytes" + "fmt" "io/ioutil" "os" - "github.com/pkg/errors" - "github.com/elastic/go-sysinfo/types" ) @@ -44,7 +43,7 @@ func MachineID() (string, error) { } // Return with error on any other error - return "", errors.Wrapf(err, "failed to read %v", file) + return "", fmt.Errorf("failed to read %v: %w", file, err) } // Found it diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go index 758caaba..c0c5ab85 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go @@ -18,7 +18,7 @@ package linux import ( - "github.com/pkg/errors" + "fmt" "github.com/elastic/go-sysinfo/types" ) @@ -29,10 +29,10 @@ func parseMemInfo(content []byte) (*types.HostMemoryInfo, error) { } hasAvailable := false - err := parseKeyValue(content, ":", func(key, value []byte) error { + err := parseKeyValue(content, ':', func(key, value []byte) error { num, err := parseBytesOrNumber(value) if err != nil { - return errors.Wrapf(err, "failed to parse %v value of %v", string(key), string(value)) + return fmt.Errorf("failed to parse %v value of %v: %w", string(key), string(value), err) } k := string(key) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go index b0402009..f5b02bef 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/os.go @@ -20,7 +20,7 @@ package linux import ( "bufio" "bytes" - "io/ioutil" + "fmt" "os" "path/filepath" "regexp" @@ -28,7 +28,6 @@ import ( "strings" "github.com/joeshaw/multierror" - "github.com/pkg/errors" "github.com/elastic/go-sysinfo/types" ) @@ -37,7 +36,7 @@ const ( osRelease = "/etc/os-release" lsbRelease = "/etc/lsb-release" distribRelease = "/etc/*-release" - versionGrok = `(?P(?P[0-9]+)\.?(?P[0-9]+)?\.?(?P\w+)?)(?: \((?P\w+)\))?` + versionGrok = `(?P(?P[0-9]+)\.?(?P[0-9]+)?\.?(?P\w+)?)(?: \((?P[-\w ]+)\))?` ) var ( @@ -50,7 +49,11 @@ var ( // familyMap contains a mapping of family -> []platforms. var familyMap = map[string][]string{ - "redhat": {"redhat", "fedora", "centos", "scientific", "oraclelinux", "ol", "amzn", "rhel"}, + "arch": {"arch", "antergos", "manjaro"}, + "redhat": { + "redhat", "fedora", "centos", "scientific", "oraclelinux", "ol", + "amzn", "rhel", "almalinux", "openeuler", "rocky", + }, "debian": {"debian", "ubuntu", "raspbian", "linuxmint"}, "suse": {"suse", "sles", "opensuse"}, } @@ -96,14 +99,14 @@ func getOSInfo(baseDir string) (*types.OSInfo, error) { } func getOSRelease(baseDir string) (*types.OSInfo, error) { - lsbRel, _ := ioutil.ReadFile(filepath.Join(baseDir, lsbRelease)) + lsbRel, _ := os.ReadFile(filepath.Join(baseDir, lsbRelease)) - osRel, err := ioutil.ReadFile(filepath.Join(baseDir, osRelease)) + osRel, err := os.ReadFile(filepath.Join(baseDir, osRelease)) if err != nil { return nil, err } if len(osRel) == 0 { - return nil, errors.Errorf("%v is empty", osRelease) + return nil, fmt.Errorf("%v is empty: %w", osRelease, err) } return parseOSRelease(append(lsbRel, osRel...)) @@ -147,16 +150,15 @@ func parseOSRelease(content []byte) (*types.OSInfo, error) { func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { os := &types.OSInfo{ Type: "linux", - Platform: osRelease["ID"], - Name: osRelease["NAME"], - Version: osRelease["VERSION"], + Platform: firstOf(osRelease, "ID", "DISTRIB_ID"), + Name: firstOf(osRelease, "NAME", "PRETTY_NAME"), + Version: firstOf(osRelease, "VERSION", "VERSION_ID", "DISTRIB_RELEASE"), Build: osRelease["BUILD_ID"], - Codename: osRelease["VERSION_CODENAME"], + Codename: firstOf(osRelease, "VERSION_CODENAME", "DISTRIB_CODENAME"), } if os.Codename == "" { - // Some OSes uses their own CODENAME keys (e.g UBUNTU_CODENAME) or we - // can get the DISTRIB_CODENAME value from the lsb-release data. + // Some OSes use their own CODENAME keys (e.g UBUNTU_CODENAME). for k, v := range osRelease { if strings.Contains(k, "CODENAME") { os.Codename = v @@ -166,10 +168,19 @@ func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { } if os.Platform == "" { - // Fallback to the first word of the NAME field. - parts := strings.SplitN(os.Name, " ", 2) - if len(parts) > 0 { - os.Platform = strings.ToLower(parts[0]) + // Fallback to the first word of the Name field. + os.Platform, _, _ = strings.Cut(os.Name, " ") + } + + os.Family = linuxFamily(os.Platform) + if os.Family == "" { + // ID_LIKE is a space-separated list of OS identifiers that this + // OS is similar to. Use this to figure out the Linux family. + for _, id := range strings.Fields(osRelease["ID_LIKE"]) { + os.Family = linuxFamily(id) + if os.Family != "" { + break + } } } @@ -192,7 +203,6 @@ func makeOSInfo(osRelease map[string]string) (*types.OSInfo, error) { } } - os.Family = platformToFamilyMap[strings.ToLower(os.Platform)] return os, nil } @@ -214,22 +224,22 @@ func findDistribRelease(baseDir string) (*types.OSInfo, error) { osInfo, err := getDistribRelease(path) if err != nil { - errs = append(errs, errors.Wrapf(err, "in %s", path)) + errs = append(errs, fmt.Errorf("in %s: %w", path, err)) continue } return osInfo, err } - return nil, errors.Wrap(&multierror.MultiError{Errors: errs}, "no valid /etc/-release file found") + return nil, fmt.Errorf("no valid /etc/-release file found: %w", &multierror.MultiError{Errors: errs}) } func getDistribRelease(file string) (*types.OSInfo, error) { - data, err := ioutil.ReadFile(file) + data, err := os.ReadFile(file) if err != nil { return nil, err } parts := bytes.SplitN(data, []byte("\n"), 2) if len(parts) != 2 { - return nil, errors.Errorf("failed to parse %v", file) + return nil, fmt.Errorf("failed to parse %v", file) } // Use distrib as platform name. @@ -269,6 +279,40 @@ func parseDistribRelease(platform string, content []byte) (*types.OSInfo, error) } } - os.Family = platformToFamilyMap[strings.ToLower(os.Platform)] + os.Family = linuxFamily(os.Platform) return os, nil } + +// firstOf returns the first non-empty value found in the map while +// iterating over keys. +func firstOf(kv map[string]string, keys ...string) string { + for _, key := range keys { + if v := kv[key]; v != "" { + return v + } + } + return "" +} + +// linuxFamily returns the linux distribution family associated to the OS platform. +// If there is no family associated then it returns an empty string. +func linuxFamily(platform string) string { + if platform == "" { + return "" + } + + platform = strings.ToLower(platform) + + // First try a direct lookup. + if family, found := platformToFamilyMap[platform]; found { + return family + } + + // Try prefix matching (e.g. opensuse matches opensuse-tumpleweed). + for platformPrefix, family := range platformToFamilyMap { + if strings.HasPrefix(platform, platformPrefix) { + return family + } + } + return "" +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go index 10cb947e..52bae255 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/process_linux.go @@ -229,7 +229,7 @@ func (p *process) User() (types.UserInfo, error) { } var user types.UserInfo - err = parseKeyValue(content, ":", func(key, value []byte) error { + err = parseKeyValue(content, ':', func(key, value []byte) error { // See proc(5) for the format of /proc/[pid]/status switch string(key) { case "Uid": diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/procnet.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/procnet.go index a2cc2867..1356c2a8 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/procnet.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/procnet.go @@ -18,13 +18,12 @@ package linux import ( + "errors" "fmt" "reflect" "strconv" "strings" - "github.com/pkg/errors" - "github.com/elastic/go-sysinfo/types" ) @@ -64,13 +63,13 @@ func parseEntry(line1, line2 string) (map[string]uint64, error) { if strings.Contains(value, "-") { signedParsed, err := strconv.ParseInt(value, 10, 64) if err != nil { - return nil, errors.Wrapf(err, "error parsing string to int in line: %#v", valueArr) + return nil, fmt.Errorf("error parsing string to int in line: %#v: %w", valueArr, err) } parsed = uint64(signedParsed) } else { parsed, err = strconv.ParseUint(value, 10, 64) if err != nil { - return nil, errors.Wrapf(err, "error parsing string to int in line: %#v", valueArr) + return nil, fmt.Errorf("error parsing string to int in line: %#v: %w", valueArr, err) } } @@ -97,7 +96,7 @@ func parseNetFile(body string) (map[string]map[string]uint64, error) { } valMap, err := parseEntry(keysSplit[1], valuesSplit[1]) if err != nil { - return nil, errors.Wrap(err, "error parsing lines") + return nil, fmt.Errorf("error parsing lines: %w", err) } fileMetrics[valuesSplit[0]] = valMap } @@ -108,7 +107,7 @@ func parseNetFile(body string) (map[string]map[string]uint64, error) { func getNetSnmpStats(raw []byte) (types.SNMP, error) { snmpData, err := parseNetFile(string(raw)) if err != nil { - return types.SNMP{}, errors.Wrap(err, "error parsing SNMP") + return types.SNMP{}, fmt.Errorf("error parsing SNMP: %w", err) } output := types.SNMP{} fillStruct(&output, snmpData) @@ -120,7 +119,7 @@ func getNetSnmpStats(raw []byte) (types.SNMP, error) { func getNetstatStats(raw []byte) (types.Netstat, error) { netstatData, err := parseNetFile(string(raw)) if err != nil { - return types.Netstat{}, errors.Wrap(err, "error parsing netstat") + return types.Netstat{}, fmt.Errorf("error parsing netstat: %w", err) } output := types.Netstat{} fillStruct(&output, netstatData) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go index d04bb3c7..fd38ea45 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/seccomp_linux.go @@ -47,7 +47,7 @@ func (m SeccompMode) String() string { func readSeccompFields(content []byte) (*types.SeccompInfo, error) { var seccomp types.SeccompInfo - err := parseKeyValue(content, ":", func(key, value []byte) error { + err := parseKeyValue(content, ':', func(key, value []byte) error { switch string(key) { case "Seccomp": mode, err := strconv.ParseUint(string(value), 10, 8) diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go index 0be3f6b0..8d9c27df 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go @@ -20,26 +20,34 @@ package linux import ( "bufio" "bytes" + "errors" + "fmt" "io/ioutil" "strconv" - - "github.com/pkg/errors" ) -func parseKeyValue(content []byte, separator string, callback func(key, value []byte) error) error { - sc := bufio.NewScanner(bytes.NewReader(content)) - for sc.Scan() { - parts := bytes.SplitN(sc.Bytes(), []byte(separator), 2) - if len(parts) != 2 { +// parseKeyValue parses key/val pairs separated by the provided separator from +// each line in content and invokes the callback. White-space is trimmed from +// val. Empty lines are ignored. All non-empty lines must contain the separator +// otherwise an error is returned. +func parseKeyValue(content []byte, separator byte, callback func(key, value []byte) error) error { + var line []byte + + for len(content) > 0 { + line, content, _ = bytes.Cut(content, []byte{'\n'}) + if len(line) == 0 { continue } - if err := callback(parts[0], bytes.TrimSpace(parts[1])); err != nil { - return err + key, value, ok := bytes.Cut(line, []byte{separator}) + if !ok { + return fmt.Errorf("separator %q not found", separator) } + + callback(key, bytes.TrimSpace(value)) } - return sc.Err() + return nil } func findValue(filename, separator, key string) (string, error) { @@ -57,12 +65,12 @@ func findValue(filename, separator, key string) (string, error) { } } if len(line) == 0 { - return "", errors.Errorf("%v not found", key) + return "", fmt.Errorf("%v not found", key) } parts := bytes.SplitN(line, []byte(separator), 2) if len(parts) != 2 { - return "", errors.Errorf("unexpected line format for '%v'", string(line)) + return "", fmt.Errorf("unexpected line format for '%v'", string(line)) } return string(bytes.TrimSpace(parts[1])), nil @@ -94,7 +102,7 @@ func parseBytesOrNumber(data []byte) (uint64, error) { num, err := strconv.ParseUint(string(parts[0]), 10, 64) if err != nil { - return 0, errors.Wrap(err, "failed to parse value") + return 0, fmt.Errorf("failed to parse value: %w", err) } var multiplier uint64 = 1 @@ -103,7 +111,7 @@ func parseBytesOrNumber(data []byte) (uint64, error) { case "kB": multiplier = 1024 default: - return 0, errors.Errorf("unhandled unit %v", string(parts[1])) + return 0, fmt.Errorf("unhandled unit %v", string(parts[1])) } } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go b/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go index 0a228678..ea918c84 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go @@ -18,10 +18,9 @@ package linux import ( + "fmt" "reflect" - "github.com/pkg/errors" - "github.com/elastic/go-sysinfo/types" ) @@ -46,11 +45,11 @@ func parseVMStat(content []byte) (*types.VMStatInfo, error) { var vmStat types.VMStatInfo refValues := reflect.ValueOf(&vmStat).Elem() - err := parseKeyValue(content, " ", func(key, value []byte) error { + err := parseKeyValue(content, ' ', func(key, value []byte) error { // turn our []byte value into an int val, err := parseBytesOrNumber(value) if err != nil { - return errors.Wrapf(err, "failed to parse %v value of %v", string(key), string(value)) + return fmt.Errorf("failed to parse %v value of %v: %w", string(key), string(value), err) } idx, ok := vmstatTagToFieldIndex[string(key)] diff --git a/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go b/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go new file mode 100644 index 00000000..8cba7bc2 --- /dev/null +++ b/vendor/github.com/elastic/go-sysinfo/providers/shared/fqdn.go @@ -0,0 +1,77 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//go:build linux || darwin || aix + +package shared + +import ( + "fmt" + "net" + "os" + "strings" +) + +// FQDN attempts to lookup the host's fully-qualified domain name and returns it. +// It does so using the following algorithm: +// +// 1. It gets the hostname from the OS. If this step fails, it returns an error. +// +// 2. It tries to perform a CNAME DNS lookup for the hostname. If this succeeds, it +// returns the CNAME (after trimming any trailing period) as the FQDN. +// +// 3. It tries to perform an IP lookup for the hostname. If this succeeds, it tries +// to perform a reverse DNS lookup on the returned IPs and returns the first +// successful result (after trimming any trailing period) as the FQDN. +// +// 4. If steps 2 and 3 both fail, an empty string is returned as the FQDN along with +// errors from those steps. +func FQDN() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("could not get hostname to look for FQDN: %w", err) + } + + return fqdn(hostname) +} + +func fqdn(hostname string) (string, error) { + var errs error + cname, err := net.LookupCNAME(hostname) + if err != nil { + errs = fmt.Errorf("could not get FQDN, all methods failed: failed looking up CNAME: %w", + err) + } + if cname != "" { + return strings.ToLower(strings.TrimSuffix(cname, ".")), nil + } + + ips, err := net.LookupIP(hostname) + if err != nil { + errs = fmt.Errorf("%s: failed looking up IP: %w", errs, err) + } + + for _, ip := range ips { + names, err := net.LookupAddr(ip.String()) + if err != nil || len(names) == 0 { + continue + } + return strings.ToLower(strings.TrimSuffix(names[0], ".")), nil + } + + return "", errs +} diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go index 4887cb1a..e04d9a40 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/boottime_windows.go @@ -20,18 +20,11 @@ package windows import ( "time" - "github.com/pkg/errors" - - windows "github.com/elastic/go-windows" + "golang.org/x/sys/windows" ) func BootTime() (time.Time, error) { - msSinceBoot, err := windows.GetTickCount64() - if err != nil { - return time.Time{}, errors.Wrap(err, "failed to get boot time") - } - - bootTime := time.Now().Add(-1 * time.Duration(msSinceBoot) * time.Millisecond) + bootTime := time.Now().Add(-1 * windows.DurationSinceBoot()) // According to GetTickCount64, the resolution of the value is limited to // the resolution of the system timer, which is typically in the range of diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/device_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/device_windows.go index 998295de..372f125f 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/device_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/device_windows.go @@ -18,10 +18,11 @@ package windows import ( + "errors" + "fmt" "strings" "unsafe" - "github.com/pkg/errors" "golang.org/x/sys/windows" ) @@ -111,7 +112,7 @@ func (mapper *deviceMapper) DevicePathToDrivePath(path string) (string, error) { isMUP := strings.Index(pathLower, DeviceMup) == 0 mask, err := mapper.GetLogicalDrives() if err != nil { - return "", errors.Wrap(err, "GetLogicalDrives") + return "", fmt.Errorf("GetLogicalDrives: %w", err) } for bit := uint32(0); mask != 0 && bit < uint32('Z'-'A'+1); bit++ { @@ -177,7 +178,7 @@ func (m testingDeviceProvider) QueryDosDevice(nameW *uint16, buf *uint16, length } path, ok := m[drive] if !ok { - return 0, errors.Errorf("drive %c not found", drive) + return 0, fmt.Errorf("drive %c not found", drive) } n := uint32(len(path)) if n+2 > length { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go index 0bc99e46..b429ff2e 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/host_windows.go @@ -18,12 +18,18 @@ package windows import ( + "errors" + "fmt" "os" + "strings" + "syscall" "time" - windows "github.com/elastic/go-windows" "github.com/joeshaw/multierror" - "github.com/pkg/errors" + + stdwindows "golang.org/x/sys/windows" + + windows "github.com/elastic/go-windows" "github.com/elastic/go-sysinfo/internal/registry" "github.com/elastic/go-sysinfo/providers/shared" @@ -78,6 +84,15 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { }, nil } +func (h *host) FQDN() (string, error) { + fqdn, err := getComputerNameEx(stdwindows.ComputerNamePhysicalDnsFullyQualified) + if err != nil { + return "", fmt.Errorf("could not get windows FQDN: %s", err) + } + + return strings.ToLower(strings.TrimSuffix(fqdn, ".")), nil +} + func newHost() (*host, error) { h := &host{} r := &reader{} @@ -98,7 +113,7 @@ type reader struct { func (r *reader) addErr(err error) bool { if err != nil { - if errors.Cause(err) != types.ErrNotImplemented { + if !errors.Is(err, types.ErrNotImplemented) { r.errs = append(r.errs, err) } return true @@ -134,7 +149,38 @@ func (r *reader) hostname(h *host) { if r.addErr(err) { return } - h.info.Hostname = v + h.info.Hostname = strings.ToLower(v) +} + +func getComputerNameEx(name uint32) (string, error) { + size := uint32(64) + + for { + buff := make([]uint16, size) + err := stdwindows.GetComputerNameEx( + name, &buff[0], &size) + if err == nil { + return syscall.UTF16ToString(buff[:size]), nil + } + + // ERROR_MORE_DATA means buff is too small and size is set to the + // number of bytes needed to store the FQDN. For details, see + // https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getcomputernameexw#return-value + if errors.Is(err, syscall.ERROR_MORE_DATA) { + // Safeguard to avoid an infinite loop. + if size <= uint32(len(buff)) { + return "", fmt.Errorf( + "windows.GetComputerNameEx returned ERROR_MORE_DATA, " + + "but data size should fit into buffer") + } else { + // Grow the buffer and try again. + buff = make([]uint16, size) + continue + } + } + + return "", fmt.Errorf("could not get windows FQDN: could not get windows.ComputerNamePhysicalDnsFullyQualified: %w", err) + } } func (r *reader) network(h *host) { diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go index ea814f4c..0c69c89d 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/machineid_windows.go @@ -18,7 +18,8 @@ package windows import ( - "github.com/pkg/errors" + "fmt" + "golang.org/x/sys/windows/registry" ) @@ -33,13 +34,13 @@ func getMachineGUID() (string, error) { k, err := registry.OpenKey(key, path, registry.READ|registry.WOW64_64KEY) if err != nil { - return "", errors.Wrapf(err, `failed to open HKLM\%v`, path) + return "", fmt.Errorf(`failed to open HKLM\%v: %w`, path, err) } defer k.Close() guid, _, err := k.GetStringValue(name) if err != nil { - return "", errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + return "", fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) } return guid, nil diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go index 10ed595c..5fa696a0 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/os_windows.go @@ -22,7 +22,6 @@ import ( "strconv" "strings" - "github.com/pkg/errors" "golang.org/x/sys/windows/registry" "github.com/elastic/go-sysinfo/types" @@ -35,7 +34,7 @@ func OperatingSystem() (*types.OSInfo, error) { k, err := registry.OpenKey(key, path, flags) if err != nil { - return nil, errors.Wrapf(err, `failed to open HKLM\%v`, path) + return nil, fmt.Errorf(`failed to open HKLM\%v: %w`, path, err) } defer k.Close() @@ -47,7 +46,7 @@ func OperatingSystem() (*types.OSInfo, error) { name := "ProductName" osInfo.Name, _, err = k.GetStringValue(name) if err != nil { - return nil, errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) } // Newer versions (Win 10 and 2016) have CurrentMajor/CurrentMinor. @@ -61,7 +60,7 @@ func OperatingSystem() (*types.OSInfo, error) { name = "CurrentVersion" osInfo.Version, _, err = k.GetStringValue(name) if err != nil { - return nil, errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) } parts := strings.SplitN(osInfo.Version, ".", 3) for i, p := range parts { @@ -69,7 +68,7 @@ func OperatingSystem() (*types.OSInfo, error) { case 0: osInfo.Major, _ = strconv.Atoi(p) case 1: - osInfo.Major, _ = strconv.Atoi(p) + osInfo.Minor, _ = strconv.Atoi(p) } } } @@ -77,7 +76,7 @@ func OperatingSystem() (*types.OSInfo, error) { name = "CurrentBuild" currentBuild, _, err := k.GetStringValue(name) if err != nil { - return nil, errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) } osInfo.Build = currentBuild @@ -85,7 +84,7 @@ func OperatingSystem() (*types.OSInfo, error) { name = "UBR" updateBuildRevision, _, err := k.GetIntegerValue(name) if err != nil && err != registry.ErrNotExist { - return nil, errors.Wrapf(err, `failed to get value of HKLM\%v\%v`, path, name) + return nil, fmt.Errorf(`failed to get value of HKLM\%v\%v: %w`, path, name, err) } else { osInfo.Build = fmt.Sprintf("%v.%d", osInfo.Build, updateBuildRevision) } diff --git a/vendor/github.com/elastic/go-sysinfo/providers/windows/process_windows.go b/vendor/github.com/elastic/go-sysinfo/providers/windows/process_windows.go index 3bb2a896..7086bce0 100644 --- a/vendor/github.com/elastic/go-sysinfo/providers/windows/process_windows.go +++ b/vendor/github.com/elastic/go-sysinfo/providers/windows/process_windows.go @@ -18,6 +18,8 @@ package windows import ( + "errors" + "fmt" "os" "path/filepath" "strings" @@ -25,7 +27,6 @@ import ( "time" "unsafe" - "github.com/pkg/errors" syswin "golang.org/x/sys/windows" windows "github.com/elastic/go-windows" @@ -41,7 +42,7 @@ var ( func (s windowsSystem) Processes() (procs []types.Process, err error) { pids, err := windows.EnumProcesses() if err != nil { - return nil, errors.Wrap(err, "EnumProcesses") + return nil, fmt.Errorf("EnumProcesses: %w", err) } procs = make([]types.Process, 0, len(pids)) var proc types.Process @@ -124,10 +125,10 @@ func (p *process) init() error { var args []string var cwd string var ppid int - pbi, err := getProcessBasicInformation(handle) + pbi, err := getProcessBasicInformation(syswin.Handle(handle)) if err == nil { ppid = int(pbi.InheritedFromUniqueProcessID) - userProcParams, err := getUserProcessParams(handle, pbi) + userProcParams, err := getUserProcessParams(syswin.Handle(handle), pbi) if err == nil { if argsW, err := readProcessUnicodeString(handle, &userProcParams.CommandLine); err == nil { args, err = splitCommandline(argsW) @@ -158,15 +159,16 @@ func (p *process) init() error { return nil } -func getProcessBasicInformation(handle syscall.Handle) (pbi windows.ProcessBasicInformationStruct, err error) { - actualSize, err := windows.NtQueryInformationProcess(handle, windows.ProcessBasicInformation, unsafe.Pointer(&pbi), uint32(windows.SizeOfProcessBasicInformationStruct)) +func getProcessBasicInformation(handle syswin.Handle) (pbi windows.ProcessBasicInformationStruct, err error) { + var actualSize uint32 + err = syswin.NtQueryInformationProcess(handle, syswin.ProcessBasicInformation, unsafe.Pointer(&pbi), uint32(windows.SizeOfProcessBasicInformationStruct), &actualSize) if actualSize < uint32(windows.SizeOfProcessBasicInformationStruct) { return pbi, errors.New("bad size for PROCESS_BASIC_INFORMATION") } return pbi, err } -func getUserProcessParams(handle syscall.Handle, pbi windows.ProcessBasicInformationStruct) (params windows.RtlUserProcessParameters, err error) { +func getUserProcessParams(handle syswin.Handle, pbi windows.ProcessBasicInformationStruct) (params windows.RtlUserProcessParameters, err error) { const is32bitProc = unsafe.Sizeof(uintptr(0)) == 4 // Offset of params field within PEB structure. @@ -179,12 +181,13 @@ func getUserProcessParams(handle syscall.Handle, pbi windows.ProcessBasicInforma // Read the PEB from the target process memory pebSize := paramsOffset + 8 peb := make([]byte, pebSize) - nRead, err := windows.ReadProcessMemory(handle, pbi.PebBaseAddress, peb) + var nRead uintptr + err = syswin.ReadProcessMemory(handle, pbi.PebBaseAddress, &peb[0], uintptr(pebSize), &nRead) if err != nil { return params, err } if nRead != uintptr(pebSize) { - return params, errors.Errorf("PEB: short read (%d/%d)", nRead, pebSize) + return params, fmt.Errorf("PEB: short read (%d/%d)", nRead, pebSize) } // Get the RTL_USER_PROCESS_PARAMETERS struct pointer from the PEB @@ -192,12 +195,12 @@ func getUserProcessParams(handle syscall.Handle, pbi windows.ProcessBasicInforma // Read the RTL_USER_PROCESS_PARAMETERS from the target process memory paramsBuf := make([]byte, windows.SizeOfRtlUserProcessParameters) - nRead, err = windows.ReadProcessMemory(handle, paramsAddr, paramsBuf) + err = syswin.ReadProcessMemory(handle, paramsAddr, ¶msBuf[0], uintptr(windows.SizeOfRtlUserProcessParameters), &nRead) if err != nil { return params, err } if nRead != uintptr(windows.SizeOfRtlUserProcessParameters) { - return params, errors.Errorf("RTL_USER_PROCESS_PARAMETERS: short read (%d/%d)", nRead, windows.SizeOfRtlUserProcessParameters) + return params, fmt.Errorf("RTL_USER_PROCESS_PARAMETERS: short read (%d/%d)", nRead, windows.SizeOfRtlUserProcessParameters) } params = *(*windows.RtlUserProcessParameters)(unsafe.Pointer(¶msBuf[0])) @@ -219,7 +222,7 @@ func readProcessUnicodeString(handle syscall.Handle, s *windows.UnicodeString) ( return nil, err } if nRead != uintptr(s.Size) { - return nil, errors.Errorf("unicode string: short read: (%d/%d)", nRead, s.Size) + return nil, fmt.Errorf("unicode string: short read: (%d/%d)", nRead, s.Size) } return buf, nil } @@ -288,43 +291,41 @@ func (p *process) Info() (types.ProcessInfo, error) { func (p *process) User() (types.UserInfo, error) { handle, err := p.open() if err != nil { - return types.UserInfo{}, errors.Wrap(err, "OpenProcess failed") + return types.UserInfo{}, fmt.Errorf("OpenProcess failed: %w", err) } defer syscall.CloseHandle(handle) var accessToken syswin.Token err = syswin.OpenProcessToken(syswin.Handle(handle), syscall.TOKEN_QUERY, &accessToken) if err != nil { - return types.UserInfo{}, errors.Wrap(err, "OpenProcessToken failed") + return types.UserInfo{}, fmt.Errorf("OpenProcessToken failed: %w", err) } defer accessToken.Close() tokenUser, err := accessToken.GetTokenUser() if err != nil { - return types.UserInfo{}, errors.Wrap(err, "GetTokenUser failed") + return types.UserInfo{}, fmt.Errorf("GetTokenUser failed: %w", err) } sid, err := sidToString(tokenUser.User.Sid) if sid == "" || err != nil { - const errStr = "failed to look up user SID" if err != nil { - return types.UserInfo{}, errors.Wrap(err, errStr) + return types.UserInfo{}, fmt.Errorf("failed to look up user SID: %w", err) } - return types.UserInfo{}, errors.New(errStr) + return types.UserInfo{}, errors.New("failed to look up user SID") } tokenGroup, err := accessToken.GetTokenPrimaryGroup() if err != nil { - return types.UserInfo{}, errors.Wrap(err, "GetTokenPrimaryGroup failed") + return types.UserInfo{}, fmt.Errorf("GetTokenPrimaryGroup failed: %w", err) } gsid, err := sidToString(tokenGroup.PrimaryGroup) if gsid == "" || err != nil { - const errStr = "failed to look up primary group SID" if err != nil { - return types.UserInfo{}, errors.Wrap(err, errStr) + return types.UserInfo{}, fmt.Errorf("failed to look up primary group SID: %w", err) } - return types.UserInfo{}, errors.New(errStr) + return types.UserInfo{}, errors.New("failed to look up primary group SID") } return types.UserInfo{ diff --git a/vendor/github.com/elastic/go-sysinfo/system.go b/vendor/github.com/elastic/go-sysinfo/system.go index baf540bb..b9a33607 100644 --- a/vendor/github.com/elastic/go-sysinfo/system.go +++ b/vendor/github.com/elastic/go-sysinfo/system.go @@ -43,6 +43,7 @@ func Go() types.GoInfo { // Host returns information about host on which this process is running. If // host information collection is not implemented for this platform then // types.ErrNotImplemented is returned. +// On Darwin (macOS) a types.ErrNotImplemented is returned with cgo disabled. func Host() (types.Host, error) { provider := registry.GetHostProvider() if provider == nil { diff --git a/vendor/github.com/elastic/go-sysinfo/types/errors.go b/vendor/github.com/elastic/go-sysinfo/types/errors.go index c7ec1693..7e509bc4 100644 --- a/vendor/github.com/elastic/go-sysinfo/types/errors.go +++ b/vendor/github.com/elastic/go-sysinfo/types/errors.go @@ -17,7 +17,7 @@ package types -import "github.com/pkg/errors" +import "errors" // ErrNotImplemented represents an error for a function that is not implemented on a particular platform. var ErrNotImplemented = errors.New("unimplemented") diff --git a/vendor/github.com/elastic/go-sysinfo/types/host.go b/vendor/github.com/elastic/go-sysinfo/types/host.go index b23a08d7..5685e984 100644 --- a/vendor/github.com/elastic/go-sysinfo/types/host.go +++ b/vendor/github.com/elastic/go-sysinfo/types/host.go @@ -20,10 +20,15 @@ package types import "time" // Host is the interface that wraps methods for returning Host stats +// It may return partial information if the provider +// implementation is unable to collect all of the necessary data. type Host interface { CPUTimer Info() HostInfo Memory() (*HostMemoryInfo, error) + + // FQDN returns the fully-qualified domain name of the host, lowercased. + FQDN() (string, error) } // NetworkCounters represents network stats from /proc/net @@ -64,7 +69,7 @@ type HostInfo struct { Architecture string `json:"architecture"` // Hardware architecture (e.g. x86_64, arm, ppc, mips). BootTime time.Time `json:"boot_time"` // Host boot time. Containerized *bool `json:"containerized,omitempty"` // Is the process containerized. - Hostname string `json:"name"` // Hostname + Hostname string `json:"name"` // Hostname, lowercased. IPs []string `json:"ip,omitempty"` // List of all IPs. KernelVersion string `json:"kernel_version"` // Kernel version. MACs []string `json:"mac"` // List of MAC addresses. @@ -96,7 +101,7 @@ type OSInfo struct { // LoadAverage is the interface that wraps the LoadAverage method. // LoadAverage returns load info on the host type LoadAverage interface { - LoadAverage() LoadAverageInfo + LoadAverage() (*LoadAverageInfo, error) } // LoadAverageInfo contains load statistics diff --git a/vendor/github.com/elastic/go-sysinfo/types/process.go b/vendor/github.com/elastic/go-sysinfo/types/process.go index 20787b29..c02ac9dc 100644 --- a/vendor/github.com/elastic/go-sysinfo/types/process.go +++ b/vendor/github.com/elastic/go-sysinfo/types/process.go @@ -22,6 +22,9 @@ import "time" // Process is the main wrapper for gathering information on a process type Process interface { CPUTimer + // Info returns process info. + // It may return partial information if the provider + // implementation is unable to collect all the necessary data. Info() (ProcessInfo, error) Memory() (MemoryInfo, error) User() (UserInfo, error) @@ -99,6 +102,8 @@ type CPUTimer interface { // The User and System fields are guaranteed // to be populated for all platforms, and // for both hosts and processes. + // This may return types.ErrNotImplemented + // if the provider cannot implement collection of this data. CPUTime() (CPUTimes, error) } diff --git a/vendor/github.com/elastic/go-windows/CHANGELOG.md b/vendor/github.com/elastic/go-windows/CHANGELOG.md index f87a51dd..68698e29 100644 --- a/vendor/github.com/elastic/go-windows/CHANGELOG.md +++ b/vendor/github.com/elastic/go-windows/CHANGELOG.md @@ -18,6 +18,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Security +## [1.0.1] - 2019-08-28 + +### Security + +- Load DLLs only from Windows system directory. + ## [1.0.0] - 2019-04-26 ### Added @@ -30,5 +36,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - EnumProcesses - Add GetProcessHandleCount to kernel32. #7 -[Unreleased]: https://github.com/elastic/go-windows/compare/v1.0.0...HEAD +[Unreleased]: https://github.com/elastic/go-windows/compare/v1.0.1...HEAD +[1.0.1]: https://github.com/elastic/go-windows/v1.0.1 [1.0.0]: https://github.com/elastic/go-windows/v1.0.0 diff --git a/vendor/github.com/elastic/go-windows/doc.go b/vendor/github.com/elastic/go-windows/doc.go index 62bf0ed8..dc8a379c 100644 --- a/vendor/github.com/elastic/go-windows/doc.go +++ b/vendor/github.com/elastic/go-windows/doc.go @@ -20,4 +20,6 @@ package windows // Use "GOOS=windows go generate -v -x" to generate the sources. // Add -trace to enable debug prints around syscalls. -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -systemdll=false -output zsyscall_windows.go kernel32.go version.go psapi.go ntdll.go +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -systemdll=true -output=zsyscall_windows.go kernel32.go version.go psapi.go ntdll.go +//go:generate go run .ci/scripts/fix_generated.go -input zsyscall_windows.go +//go:generate go-licenser diff --git a/vendor/github.com/elastic/go-windows/zsyscall_windows.go b/vendor/github.com/elastic/go-windows/zsyscall_windows.go index 0767d2dc..1a6bcd9a 100644 --- a/vendor/github.com/elastic/go-windows/zsyscall_windows.go +++ b/vendor/github.com/elastic/go-windows/zsyscall_windows.go @@ -22,6 +22,8 @@ package windows import ( "syscall" "unsafe" + + "golang.org/x/sys/windows" ) var _ unsafe.Pointer @@ -52,10 +54,10 @@ func errnoErr(e syscall.Errno) error { } var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - modversion = syscall.NewLazyDLL("version.dll") - modpsapi = syscall.NewLazyDLL("psapi.dll") - modntdll = syscall.NewLazyDLL("ntdll.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modversion = windows.NewLazySystemDLL("version.dll") + modpsapi = windows.NewLazySystemDLL("psapi.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") procGetNativeSystemInfo = modkernel32.NewProc("GetNativeSystemInfo") procGetTickCount64 = modkernel32.NewProc("GetTickCount64") diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 00000000..e028b46a --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 00000000..4e12afdd --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -race -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 00000000..cf6b42f3 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) +[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 00000000..bec7b71b --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !(code >= 100 && code <= 199) && !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 00000000..203c35b3 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 00000000..101cedde --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 00000000..e0951df1 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/gabriel-vasile/mimetype/README.md b/vendor/github.com/gabriel-vasile/mimetype/README.md index d310928d..231b2919 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/README.md +++ b/vendor/github.com/gabriel-vasile/mimetype/README.md @@ -10,9 +10,6 @@

- - Build Status - Go Reference diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go index 29bdded3..f1e94498 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/binary.go @@ -150,32 +150,34 @@ func Marc(raw []byte, limit uint32) bool { } // Glb matches a glTF model format file. -// GLB is the binary file format representation of 3D models save in +// GLB is the binary file format representation of 3D models saved in // the GL transmission Format (glTF). -// see more: https://docs.fileformat.com/3d/glb/ -// https://www.iana.org/assignments/media-types/model/gltf-binary -// GLB file format is based on little endian and its header structure -// show below: +// GLB uses little endian and its header structure is as follows: // -// <-- 12-byte header --> -// | magic | version | length | -// | (uint32) | (uint32) | (uint32) | -// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... | -// | g l T F | 1 | ... | +// <-- 12-byte header --> +// | magic | version | length | +// | (uint32) | (uint32) | (uint32) | +// | \x67\x6C\x54\x46 | \x01\x00\x00\x00 | ... | +// | g l T F | 1 | ... | +// +// Visit [glTF specification] and [IANA glTF entry] for more details. +// +// [glTF specification]: https://registry.khronos.org/glTF/specs/2.0/glTF-2.0.html +// [IANA glTF entry]: https://www.iana.org/assignments/media-types/model/gltf-binary var Glb = prefix([]byte("\x67\x6C\x54\x46\x02\x00\x00\x00"), []byte("\x67\x6C\x54\x46\x01\x00\x00\x00")) // TzIf matches a Time Zone Information Format (TZif) file. // See more: https://tools.ietf.org/id/draft-murchison-tzdist-tzif-00.html#rfc.section.3 // Its header structure is shown below: -// +---------------+---+ -// | magic (4) | <-+-- version (1) -// +---------------+---+---------------------------------------+ -// | [unused - reserved for future use] (15) | -// +---------------+---------------+---------------+-----------+ -// | isutccnt (4) | isstdcnt (4) | leapcnt (4) | -// +---------------+---------------+---------------+ -// | timecnt (4) | typecnt (4) | charcnt (4) | +// +---------------+---+ +// | magic (4) | <-+-- version (1) +// +---------------+---+---------------------------------------+ +// | [unused - reserved for future use] (15) | +// +---------------+---------------+---------------+-----------+ +// | isutccnt (4) | isstdcnt (4) | leapcnt (4) | +// +---------------+---------------+---------------+ +// | timecnt (4) | typecnt (4) | charcnt (4) | func TzIf(raw []byte, limit uint32) bool { // File is at least 44 bytes (header size). if len(raw) < 44 { diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go index 466058fb..34b84f40 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/magic.go @@ -177,7 +177,9 @@ func newXMLSig(localName, xmlns string) xmlSig { // and, optionally, followed by the arguments for the interpreter. // // Ex: -// #! /usr/bin/env php +// +// #! /usr/bin/env php +// // /usr/bin/env is the interpreter, php is the first and only argument. func shebang(sigs ...[]byte) Detector { return func(raw []byte, limit uint32) bool { diff --git a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go index 6a156192..84ed6492 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go +++ b/vendor/github.com/gabriel-vasile/mimetype/internal/magic/text_csv.go @@ -3,6 +3,7 @@ package magic import ( "bytes" "encoding/csv" + "errors" "io" ) @@ -19,12 +20,23 @@ func Tsv(raw []byte, limit uint32) bool { func sv(in []byte, comma rune, limit uint32) bool { r := csv.NewReader(dropLastLine(in, limit)) r.Comma = comma - r.TrimLeadingSpace = true + r.ReuseRecord = true r.LazyQuotes = true r.Comment = '#' - lines, err := r.ReadAll() - return err == nil && r.FieldsPerRecord > 1 && len(lines) > 1 + lines := 0 + for { + _, err := r.Read() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return false + } + lines++ + } + + return r.FieldsPerRecord > 1 && lines > 1 } // dropLastLine drops the last incomplete line from b. diff --git a/vendor/github.com/gabriel-vasile/mimetype/mimetype.go b/vendor/github.com/gabriel-vasile/mimetype/mimetype.go index 08e68e33..1b5909b7 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/mimetype.go +++ b/vendor/github.com/gabriel-vasile/mimetype/mimetype.go @@ -39,7 +39,8 @@ func Detect(in []byte) *MIME { // // DetectReader assumes the reader offset is at the start. If the input is an // io.ReadSeeker you previously read from, it should be rewinded before detection: -// reader.Seek(0, io.SeekStart) +// +// reader.Seek(0, io.SeekStart) func DetectReader(r io.Reader) (*MIME, error) { var in []byte var err error diff --git a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md index cdec4e67..5ec6f6b6 100644 --- a/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md +++ b/vendor/github.com/gabriel-vasile/mimetype/supported_mimes.md @@ -1,4 +1,4 @@ -## 172 Supported MIME types +## 173 Supported MIME types This file is automatically generated when running tests. Do not edit manually. Extension | MIME type | Aliases diff --git a/vendor/github.com/go-faster/errors/.golangci.yml b/vendor/github.com/go-faster/errors/.golangci.yml index 17ac1061..430e979e 100644 --- a/vendor/github.com/go-faster/errors/.golangci.yml +++ b/vendor/github.com/go-faster/errors/.golangci.yml @@ -37,8 +37,6 @@ linters-settings: linters: disable-all: true enable: - - deadcode - - depguard - dogsled - errcheck - goconst @@ -53,13 +51,11 @@ linters: - misspell - nakedret - staticcheck - - structcheck - stylecheck - typecheck - unconvert - unparam - unused - - varcheck - whitespace # Do not enable: diff --git a/vendor/github.com/go-faster/errors/README.md b/vendor/github.com/go-faster/errors/README.md index 40953c6c..9f21e134 100644 --- a/vendor/github.com/go-faster/errors/README.md +++ b/vendor/github.com/go-faster/errors/README.md @@ -16,17 +16,39 @@ errors.Wrap(err, "message") * Using `Wrap` is the most explicit way to wrap errors * Wrapping with `fmt.Errorf("foo: %w", err)` is implicit, redundant and error-prone * Parsing `"foo: %w"` is implicit, redundant and slow -* The [pkg/errors](https://github.com/pkg/errors) and [xerrrors](https://pkg.go.dev/golang.org/x/xerrors) are not maintainted +* The [pkg/errors](https://github.com/pkg/errors) and [xerrors](https://pkg.go.dev/golang.org/x/xerrors) are not maintainted * The [cockroachdb/errors](https://github.com/cockroachdb/errors) is too big * The `errors` has no caller stack trace ## Don't need traces? Call `errors.DisableTrace` or use build tag `noerrtrace`. -## Migration +## Additional features + +### Into + +Generic type assertion for errors. + +```go +// Into finds the first error in err's chain that matches target type T, and if so, returns it. +// +// Into is type-safe alternative to As. +func Into[T error](err error) (val T, ok bool) ``` -go get github.com/go-faster/errors/cmd/gowrapper@latest -gowrapper ./... + +```go +if pathError, ok := errors.Into[*os.PathError](err); ok { + fmt.Println("Failed at path:", pathError.Path) +} +``` + +### Must + +Must is a generic helper, like template.Must, that wraps a call to a function returning (T, error) +and panics if the error is non-nil. + +```go +func Must[T any](val T, err error) T ``` ## License diff --git a/vendor/github.com/go-faster/errors/join_go120.go b/vendor/github.com/go-faster/errors/join_go120.go new file mode 100644 index 00000000..575d6202 --- /dev/null +++ b/vendor/github.com/go-faster/errors/join_go120.go @@ -0,0 +1,20 @@ +//go:build go1.20 +// +build go1.20 + +package errors + +import "errors" + +// Join returns an error that wraps the given errors. +// Any nil error values are discarded. +// Join returns nil if every value in errs is nil. +// The error formats as the concatenation of the strings obtained +// by calling the Error method of each element of errs, with a newline +// between each string. +// +// A non-nil error returned by Join implements the Unwrap() []error method. +// +// Available only for go 1.20 or superior. +func Join(errs ...error) error { + return errors.Join(errs...) +} diff --git a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md index 1a9a27bc..8f349c4b 100644 --- a/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md +++ b/vendor/github.com/go-logfmt/logfmt/CHANGELOG.md @@ -1,48 +1,82 @@ # Changelog + All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.6.0] - 2023-01-30 + +[0.6.0]: https://github.com/go-logfmt/logfmt/compare/v0.5.1...v0.6.0 + +### Added + +- NewDecoderSize by [@alexanderjophus] + +## [0.5.1] - 2021-08-18 + +[0.5.1]: https://github.com/go-logfmt/logfmt/compare/v0.5.0...v0.5.1 + +### Changed + +- Update the `go.mod` file for Go 1.17 as described in the [Go 1.17 release + notes](https://golang.org/doc/go1.17#go-command) + ## [0.5.0] - 2020-01-03 +[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0 + ### Changed + - Remove the dependency on github.com/kr/logfmt by [@ChrisHines] - Move fuzz code to github.com/go-logfmt/fuzzlogfmt by [@ChrisHines] ## [0.4.0] - 2018-11-21 +[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0 + ### Added + - Go module support by [@ChrisHines] - CHANGELOG by [@ChrisHines] ### Changed + - Drop invalid runes from keys instead of returning ErrInvalidKey by [@ChrisHines] - On panic while printing, attempt to print panic value by [@bboreham] ## [0.3.0] - 2016-11-15 + +[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0 + ### Added + - Pool buffers for quoted strings and byte slices by [@nussjustin] + ### Fixed + - Fuzz fix, quote invalid UTF-8 values by [@judwhite] ## [0.2.0] - 2016-05-08 + +[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0 + ### Added + - Encoder.EncodeKeyvals by [@ChrisHines] ## [0.1.0] - 2016-03-28 + +[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0 + ### Added + - Encoder by [@ChrisHines] - Decoder by [@ChrisHines] - MarshalKeyvals by [@ChrisHines] -[0.5.0]: https://github.com/go-logfmt/logfmt/compare/v0.4.0...v0.5.0 -[0.4.0]: https://github.com/go-logfmt/logfmt/compare/v0.3.0...v0.4.0 -[0.3.0]: https://github.com/go-logfmt/logfmt/compare/v0.2.0...v0.3.0 -[0.2.0]: https://github.com/go-logfmt/logfmt/compare/v0.1.0...v0.2.0 -[0.1.0]: https://github.com/go-logfmt/logfmt/commits/v0.1.0 - [@ChrisHines]: https://github.com/ChrisHines [@bboreham]: https://github.com/bboreham [@judwhite]: https://github.com/judwhite [@nussjustin]: https://github.com/nussjustin +[@alexanderjophus]: https://github.com/alexanderjophus diff --git a/vendor/github.com/go-logfmt/logfmt/README.md b/vendor/github.com/go-logfmt/logfmt/README.md index 8e48fcd3..71c57944 100644 --- a/vendor/github.com/go-logfmt/logfmt/README.md +++ b/vendor/github.com/go-logfmt/logfmt/README.md @@ -1,20 +1,25 @@ +# logfmt + [![Go Reference](https://pkg.go.dev/badge/github.com/go-logfmt/logfmt.svg)](https://pkg.go.dev/github.com/go-logfmt/logfmt) [![Go Report Card](https://goreportcard.com/badge/go-logfmt/logfmt)](https://goreportcard.com/report/go-logfmt/logfmt) [![Github Actions](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml/badge.svg)](https://github.com/go-logfmt/logfmt/actions/workflows/test.yml) -[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=master) - -# logfmt +[![Coverage Status](https://coveralls.io/repos/github/go-logfmt/logfmt/badge.svg?branch=master)](https://coveralls.io/github/go-logfmt/logfmt?branch=main) Package logfmt implements utilities to marshal and unmarshal data in the [logfmt -format](https://brandur.org/logfmt). It provides an API similar to -[encoding/json](http://golang.org/pkg/encoding/json/) and -[encoding/xml](http://golang.org/pkg/encoding/xml/). +format][fmt]. It provides an API similar to [encoding/json][json] and +[encoding/xml][xml]. + +[fmt]: https://brandur.org/logfmt +[json]: https://pkg.go.dev/encoding/json +[xml]: https://pkg.go.dev/encoding/xml The logfmt format was first documented by Brandur Leach in [this -article](https://brandur.org/logfmt). The format has not been formally -standardized. The most authoritative public specification to date has been the -documentation of a Go Language [package](http://godoc.org/github.com/kr/logfmt) -written by Blake Mizerany and Keith Rarick. +article][origin]. The format has not been formally standardized. The most +authoritative public specification to date has been the documentation of a Go +Language [package][parser] written by Blake Mizerany and Keith Rarick. + +[origin]: https://brandur.org/logfmt +[parser]: https://pkg.go.dev/github.com/kr/logfmt ## Goals @@ -30,4 +35,7 @@ standard as a goal. ## Versioning -Package logfmt publishes releases via [semver](http://semver.org/) compatible Git tags prefixed with a single 'v'. +This project publishes releases according to the Go language guidelines for +[developing and publishing modules][pub]. + +[pub]: https://go.dev/doc/modules/developing diff --git a/vendor/github.com/go-logfmt/logfmt/decode.go b/vendor/github.com/go-logfmt/logfmt/decode.go index 2013708e..a1c22dcb 100644 --- a/vendor/github.com/go-logfmt/logfmt/decode.go +++ b/vendor/github.com/go-logfmt/logfmt/decode.go @@ -29,6 +29,23 @@ func NewDecoder(r io.Reader) *Decoder { return dec } +// NewDecoderSize returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r beyond +// the logfmt records requested. +// The size argument specifies the size of the initial buffer that the +// Decoder will use to read records from r. +// If a log line is longer than the size argument, the Decoder will return +// a bufio.ErrTooLong error. +func NewDecoderSize(r io.Reader, size int) *Decoder { + scanner := bufio.NewScanner(r) + scanner.Buffer(make([]byte, 0, size), size) + dec := &Decoder{ + s: scanner, + } + return dec +} + // ScanRecord advances the Decoder to the next record, which can then be // parsed with the ScanKeyval method. It returns false when decoding stops, // either by reaching the end of the input or an error. After ScanRecord diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml new file mode 100644 index 00000000..0cffafa7 --- /dev/null +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -0,0 +1,26 @@ +run: + timeout: 1m + tests: true + +linters: + disable-all: true + enable: + - asciicheck + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 00000000..c3569600 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 00000000..5d37e294 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/go-logr/logr/LICENSE similarity index 100% rename from vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE rename to vendor/github.com/go-logr/logr/LICENSE diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 00000000..8969526a --- /dev/null +++ b/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,406 @@ +# A minimal logging API for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr) + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: + +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } + + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +When the Go developers started developing such an interface with +[slog](https://github.com/golang/go/issues/56345), they adopted some of the +logr design but also left out some parts and changed others: + +| Feature | logr | slog | +|---------|------|------| +| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) | +| Low-level API | `LogSink` | `Handler` | +| Stack unwinding | done by `LogSink` | done by `Logger` | +| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) | +| Generating a value for logging on demand | `Marshaler` | `LogValuer` | +| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" | +| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` | +| Passing logger via context | `NewContext`, `FromContext` | no API | +| Adding a name to a logger | `WithName` | no API | +| Modify verbosity of log entries in a call chain | `V` | no API | +| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | +| Pass context for extracting additional values | no API | API variants like `InfoCtx` | + +The high-level slog API is explicitly meant to be one of many different APIs +that can be layered on top of a shared `slog.Handler`. logr is one such +alternative API, with [interoperability](#slog-interoperability) provided by +some conversion functions. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas + +The main differences are: + +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2. Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug." Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +## Implementations (non-exhaustive) + +There are implementations for the following logging libraries: + +- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) +- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) + +## slog interoperability + +Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` +and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and +`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`. +As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level +slog API. + +### Using a `logr.LogSink` as backend for slog + +Ideally, a logr sink implementation should support both logr and slog by +implementing both the normal logr interface(s) and `SlogSink`. Because +of a conflict in the parameters of the common `Enabled` method, it is [not +possible to implement both slog.Handler and logr.Sink in the same +type](https://github.com/golang/go/issues/59110). + +If both are supported, log calls can go from the high-level APIs to the backend +without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can +convert back and forth without adding additional wrappers, with one exception: +when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then +`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future +log calls. + +Such an implementation should also support values that implement specific +interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`, +`slog.GroupValue`). logr does not convert those. + +Not supporting slog has several drawbacks: +- Recording source code locations works correctly if the handler gets called + through `slog.Logger`, but may be wrong in other cases. That's because a + `logr.Sink` does its own stack unwinding instead of using the program counter + provided by the high-level API. +- slog levels <= 0 can be mapped to logr levels by negating the level without a + loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as + used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink + because logr does not support "more important than info" levels. +- The slog group concept is supported by prefixing each key in a key/value + pair with the group names, separated by a dot. For structured output like + JSON it would be better to group the key/value pairs inside an object. +- Special slog values and interfaces don't work as expected. +- The overhead is likely to be higher. + +These drawbacks are severe enough that applications using a mixture of slog and +logr should switch to a different backend. + +### Using a `slog.Handler` as backend for logr + +Using a plain `slog.Handler` without support for logr works better than the +other direction: +- All logr verbosity levels can be mapped 1:1 to their corresponding slog level + by negating them. +- Stack unwinding is done by the `SlogSink` and the resulting program + counter is passed to the `slog.Handler`. +- Names added via `Logger.WithName` are gathered and recorded in an additional + attribute with `logger` as key and the names separated by slash as value. +- `Logger.Error` is turned into a log record with `slog.LevelError` as level + and an additional attribute with `err` as key, if an error was provided. + +The main drawback is that `logr.Marshaler` will not be supported. Types should +ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility +with logr implementations without slog support is not important, then +`slog.Valuer` is sufficient. + +### Context support for slog + +Storing a logger in a `context.Context` is not supported by +slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be +used to fill this gap. They store and retrieve a `slog.Logger` pointer +under the same context key that is also used by `NewContext` and +`FromContext` for `logr.Logger` value. + +When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will +automatically convert the `slog.Logger` to a +`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction. + +With this approach, binaries which use either slog or logr are as efficient as +possible with no unnecessary allocations. This is also why the API stores a +`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger` +on retrieval would need to allocate one. + +The downside is that switching back and forth needs more allocations. Because +logr is the API that is already in use by different packages, in particular +Kubernetes, the recommendation is to use the `logr.Logger` API in code which +uses contextual logging. + +An alternative to adding values to a logger and storing that logger in the +context is to store the values in the context and to configure a logging +backend to extract those values when emitting log entries. This only works when +log calls are passed the context, which is not supported by the logr API. + +With the slog API, it is possible, but not +required. https://github.com/veqryn/slog-context is a package for slog which +provides additional support code for this approach. It also contains wrappers +for the context functions in logr, so developers who prefer to not use the logr +APIs directly can use those instead and the resulting code will still be +interoperable with logr. + +## FAQ + +### Conceptual + +#### Why structured logging? + +- **Structured logs are more easily queryable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc. + +- **Structured logging makes it easier to have cross-referenceable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc., instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects.) Structured logs allow you to preserve that structure when + outputting. + +#### Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +#### Why not named levels, like Info/Warning/Error? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +#### Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc. + +- They don't store structured data well, since contents are flattened into + a string. + +- They're not cross-referenceable. + +- They don't compress easily, since the message is not constant. + +(Unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys.) + +### Practical + +#### Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +#### What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` method to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +#### But I really want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair. + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +#### How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack." + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs). For reference, slog pre-defines -4 for debug logs +(corresponds to 4 in logr), which matches what is +[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). + +#### How do I choose my keys? + +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. + +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md new file mode 100644 index 00000000..1ca756fc --- /dev/null +++ b/vendor/github.com/go-logr/logr/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives us time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send an email to go-logr-security@googlegroups.com +- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +We ask that you give us 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go new file mode 100644 index 00000000..de8bcc3a --- /dev/null +++ b/vendor/github.com/go-logr/logr/context.go @@ -0,0 +1,33 @@ +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// contextKey is how we find Loggers in a context.Context. With Go < 1.21, +// the value is always a Logger value. With Go >= 1.21, the value can be a +// Logger value or a slog.Logger pointer. +type contextKey struct{} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go new file mode 100644 index 00000000..f012f9a1 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_noslog.go @@ -0,0 +1,49 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go new file mode 100644 index 00000000..065ef0b8 --- /dev/null +++ b/vendor/github.com/go-logr/logr/context_slog.go @@ -0,0 +1,83 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "fmt" + "log/slog" +) + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + v := ctx.Value(contextKey{}) + if v == nil { + return Logger{}, notFoundError{} + } + + switch v := v.(type) { + case Logger: + return v, nil + case *slog.Logger: + return FromSlogHandler(v.Handler()), nil + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found. +func FromContextAsSlogLogger(ctx context.Context) *slog.Logger { + v := ctx.Value(contextKey{}) + if v == nil { + return nil + } + + switch v := v.(type) { + case Logger: + return slog.New(ToSlogHandler(v)) + case *slog.Logger: + return v + default: + // Not reached. + panic(fmt.Sprintf("unexpected value type for logr context key: %T", v)) + } +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if logger, err := FromContext(ctx); err == nil { + return logger + } + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the +// provided slog.Logger. +func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 00000000..99fe8be9 --- /dev/null +++ b/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,24 @@ +/* +Copyright 2020 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. +func Discard() Logger { + return New(nil) +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 00000000..fb2f866f --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,911 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// # Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// # Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // LogInfoLevel tells funcr what key to use to log the info level. + // If not specified, the info level will be logged as "level". + // If this is set to "", the info level will not be logged at all. + LogInfoLevel *string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []any) []any + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []any) []any + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []any) []any + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...any) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...any) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...any) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + if opts.LogInfoLevel == nil { + opts.LogInfoLevel = new(string) + *opts.LogInfoLevel = "level" + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: &opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []any + valuesStr string + parentValuesStr string + depth int + opts *Options + group string // for slog groups + groupDepth int +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []any + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []any) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') // for the whole line + } + + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + + if f.parentValuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.parentValuesStr) + continuing = true + } + + groupDepth := f.groupDepth + if f.group != "" { + if f.valuesStr != "" || len(args) != 0 { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } else { + // The group was empty + groupDepth-- + } + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.valuesStr) + continuing = true + } + + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + + for i := 0; i < groupDepth; i++ { + buf.WriteByte('}') // for the groups + } + + if f.outputFormat == outputJSON { + buf.WriteByte('}') // for the whole line + } + + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + copied := false + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + if !copied { + newList := make([]any, len(kvList)) + copy(newList, kvList) + kvList = newList + copied = true + } + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(f.comma()) + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + buf.WriteString(f.quoted(k, escapeKeys)) + buf.WriteByte(f.colon()) + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) quoted(str string, escape bool) string { + if escape { + return prettyString(str) + } + // this is faster + return `"` + str + `"` +} + +func (f Formatter) comma() byte { + if f.outputFormat == outputJSON { + return ',' + } + return ' ' +} + +func (f Formatter) colon() byte { + if f.outputFormat == outputJSON { + return ':' + } + return '=' +} + +func (f Formatter) pretty(value any) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(f.comma()) + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + printComma := false // testing i>0 is not enough because of JSON omitted fields + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if printComma { + buf.WriteByte(f.comma()) + } + printComma = true // if we got here, we are rendering a field + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteString(f.quoted(name, false)) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + // If this is outputing as JSON make sure this isn't really a json.RawMessage. + // If so just emit "as-is" and don't pretty it as that will just print + // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want. + if f.outputFormat == outputJSON { + if rm, ok := value.(json.RawMessage); ok { + // If it's empty make sure we emit an empty value as the array style would below. + if len(rm) > 0 { + buf.Write(rm) + } else { + buf.WriteString("null") + } + return buf.String() + } + } + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(f.comma()) + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(f.comma()) + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(f.colon()) + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret any) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v any) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v any) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []any) []any { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// startGroup opens a new group scope (basically a sub-struct), which locks all +// the current saved values and starts them anew. This is needed to satisfy +// slog. +func (f *Formatter) startGroup(group string) { + // Unnamed groups are just inlined. + if group == "" { + return + } + + // Any saved values can no longer be changed. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + continuing := false + + if f.parentValuesStr != "" { + buf.WriteString(f.parentValuesStr) + continuing = true + } + + if f.group != "" && f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys + buf.WriteByte(f.colon()) + buf.WriteByte('{') // for the group + continuing = false + } + + if f.valuesStr != "" { + if continuing { + buf.WriteByte(f.comma()) + } + buf.WriteString(f.valuesStr) + } + + // NOTE: We don't close the scope here - that's done later, when a log line + // is actually rendered (because we have N scopes to close). + + f.parentValuesStr = buf.String() + + // Start collecting new values. + f.group = group + f.groupDepth++ + f.valuesStr = "" + f.values = nil +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + if key := *f.opts.LogInfoLevel; key != "" { + args = append(args, key, level) + } + args = append(args, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) { + args := make([]any, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr any + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []any) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go new file mode 100644 index 00000000..7bd84761 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go @@ -0,0 +1,105 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package funcr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +var _ logr.SlogSink = &fnlogger{} + +const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink + +func (l fnlogger) Handle(_ context.Context, record slog.Record) error { + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, kvList) + return true + }) + + if record.Level >= slog.LevelError { + l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...) + } + return nil +} + +func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, kvList) + } + l.AddValues(kvList) + return &l +} + +func (l fnlogger) WithGroup(name string) logr.SlogSink { + l.startGroup(name) + return &l +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, grpKVs) + } + if attr.Key == "" { + // slog says we have to inline these + kvList = append(kvList, grpKVs...) + } else { + kvList = append(kvList, attr.Key, PseudoStruct(grpKVs)) + } + } else if attr.Key != "" { + kvList = append(kvList, attr.Key, attrVal.Any()) + } + + return kvList +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l fnlogger) levelFromSlog(level slog.Level) int { + result := -level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 00000000..b4428e10 --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,520 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging + +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. +// +// # Usage +// +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". +// +// With Go's standard log package, we might write: +// +// log.Printf("setting target value %s", targetValue) +// +// With logr's structured logging, we'd write: +// +// logger.Info("setting target", "value", targetValue) +// +// Errors are much the same. Instead of: +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// We'd write: +// +// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. +// +// # Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. +// +// Where we might have written: +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: +// +// logger.WithName("compactor").Info("started", "time", time.Now()) +// +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). +// +// # Saved Values +// +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: +// +// With the standard log package, we might write: +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr we'd write: +// +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) +// +// # Best Practices +// +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. +// +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. Variable information can then be attached using +// named values. +// +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. +// +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// The zero logger (= Logger{}) is identical to Discard() and discards all log +// entries. Code that receives a Logger by value can simply call it, the methods +// will never crash. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// +// # Key Naming Conventions +// +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line +// +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// # Break Glass +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } +// +// Logger grants access to the sink to enable type assertions like this: +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. +package logr + +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + if sink != nil { + sink.Init(runtimeInfo) + } + return logger +} + +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} + +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + // Some implementations of LogSink look at the caller in Enabled (e.g. + // different verbosity levels per package or file), but we only pass one + // CallDepth in (via Init). This means that all calls from Logger to the + // LogSink's Enabled, Info, and Error methods must have the same number of + // frames. In other words, Logger methods can't call other Logger methods + // which call these LogSink methods unless we do it the same in all paths. + return l.sink != nil && l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...any) { + if l.sink == nil { + return + } + if l.sink.Enabled(l.level) { // see comment in Enabled + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). The log message will always be emitted, regardless of +// verbosity level. +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. +func (l Logger) Error(err error, msg string, keysAndValues ...any) { + if l.sink == nil { + return + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// GetV returns the verbosity level of the logger. If the logger's LogSink is +// nil as in the Discard logger, this will always return 0. +func (l Logger) GetV() int { + // 0 if l.sink nil because of the if check in V above. + return l.level +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...any) Logger { + if l.sink == nil { + return l + } + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} + +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } + l.setSink(l.sink.WithName(name)) + return l +} + +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} + +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil +} + +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...any) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...any) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...any) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a LogSink that knows how to climb the call stack +// to identify the original call site and can offset the depth by a specified +// number of frames. This is useful for users who have helper functions +// between the "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as file, +// function, or line) would otherwise log information about the intermediate +// helper functions. +// +// This is an optional interface and implementations are not required to +// support it. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. + // Successive calls to this are additive. + WithCallDepth(depth int) LogSink +} + +// CallStackHelperLogSink represents a LogSink that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. +// +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. +// +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} + +// Marshaler is an optional interface that logged values may choose to +// implement. Loggers with structured output, such as JSON, should +// log the object return by the MarshalLog method instead of the +// original value. +type Marshaler interface { + // MarshalLog can be used to: + // - ensure that structs are not logged as strings when the original + // value has a String method: return a different type without a + // String method + // - select which fields of a complex type should get logged: + // return a simpler struct with fewer fields + // - log unexported fields: return a different struct + // with exported fields + // + // It may return any value of any type. + MarshalLog() any +} diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go new file mode 100644 index 00000000..82d1ba49 --- /dev/null +++ b/vendor/github.com/go-logr/logr/sloghandler.go @@ -0,0 +1,192 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(ToSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because FromSlogHandler needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() LogSink { + if sink, ok := l.sink.(CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithAttrs(attrs) + clone.sink = clone.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = attrToKVs(attr, l.groupPrefix, kvList) + } + clone.sink = l.sink.WithValues(kvList...) + } + return &clone +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + if name == "" { + // slog says to inline empty groups + return l + } + clone := *l + if l.slogSink != nil { + clone.slogSink = l.slogSink.WithGroup(name) + clone.sink = clone.slogSink + } else { + clone.groupPrefix = addPrefix(clone.groupPrefix, name) + } + return &clone +} + +// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups +// and other details of slog. +func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any { + attrVal := attr.Value.Resolve() + if attrVal.Kind() == slog.KindGroup { + groupVal := attrVal.Group() + grpKVs := make([]any, 0, 2*len(groupVal)) + prefix := groupPrefix + if attr.Key != "" { + prefix = addPrefix(groupPrefix, attr.Key) + } + for _, attr := range groupVal { + grpKVs = attrToKVs(attr, prefix, grpKVs) + } + kvList = append(kvList, grpKVs...) + } else if attr.Key != "" { + kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any()) + } + + return kvList +} + +func addPrefix(prefix, name string) string { + if prefix == "" { + return name + } + if name == "" { + return prefix + } + return prefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(logr.ToSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original Logger had a V level + if result < 0 { + result = 0 // because LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go new file mode 100644 index 00000000..28a83d02 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr.go @@ -0,0 +1,100 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" +) + +// FromSlogHandler returns a Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func FromSlogHandler(handler slog.Handler) Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return Discard() + } + return New(handler.sink).V(int(handler.levelBias)) + } + return New(&slogSink{handler: handler}) +} + +// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the Logger: +// +// logger := +// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func ToSlogHandler(logger Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in FromSlogHandler +// and ToSlogHandler. +type SlogSink interface { + LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go new file mode 100644 index 00000000..4060fcbc --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogsink.go @@ -0,0 +1,120 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +import ( + "context" + "log/slog" + "runtime" + "time" +) + +var ( + _ LogSink = &slogSink{} + _ CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewFromLogHandler. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + _ = l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) LogSink { + if l.name != "" { + l.name += "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/vendor/github.com/minio/c2goasm/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE similarity index 99% rename from vendor/github.com/minio/c2goasm/LICENSE rename to vendor/github.com/go-logr/stdr/LICENSE index d6456956..261eeb9e 100644 --- a/vendor/github.com/minio/c2goasm/LICENSE +++ b/vendor/github.com/go-logr/stdr/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 00000000..51586678 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 00000000..93a8aab5 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/goccy/go-json/CHANGELOG.md b/vendor/github.com/goccy/go-json/CHANGELOG.md index d63009fd..d09bb89c 100644 --- a/vendor/github.com/goccy/go-json/CHANGELOG.md +++ b/vendor/github.com/goccy/go-json/CHANGELOG.md @@ -1,3 +1,35 @@ +# v0.10.2 - 2023/03/20 + +### New features + +* Support DebugDOT option for debugging encoder ( #440 ) + +### Fix bugs + +* Fix combination of embedding structure and omitempty option ( #442 ) + +# v0.10.1 - 2023/03/13 + +### Fix bugs + +* Fix checkptr error for array decoder ( #415 ) +* Fix added buffer size check when decoding key ( #430 ) +* Fix handling of anonymous fields other than struct ( #431 ) +* Fix to not optimize when lower conversion can't handle byte-by-byte ( #432 ) +* Fix a problem that MarshalIndent does not work when UnorderedMap is specified ( #435 ) +* Fix mapDecoder.DecodeStream() for empty objects containing whitespace ( #425 ) +* Fix an issue that could not set the correct NextField for fields in the embedded structure ( #438 ) + +# v0.10.0 - 2022/11/29 + +### New features + +* Support JSON Path ( #250 ) + +### Fix bugs + +* Fix marshaler for map's key ( #409 ) + # v0.9.11 - 2022/08/18 ### Fix bugs diff --git a/vendor/github.com/goccy/go-json/internal/decoder/array.go b/vendor/github.com/goccy/go-json/internal/decoder/array.go index 8ef91cfa..4b23ed43 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/array.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/array.go @@ -19,7 +19,9 @@ type arrayDecoder struct { } func newArrayDecoder(dec Decoder, elemType *runtime.Type, alen int, structName, fieldName string) *arrayDecoder { - zeroValue := *(*unsafe.Pointer)(unsafe_New(elemType)) + // workaround to avoid checkptr errors. cannot use `*(*unsafe.Pointer)(unsafe_New(elemType))` directly. + zeroValuePtr := unsafe_New(elemType) + zeroValue := **(**unsafe.Pointer)(unsafe.Pointer(&zeroValuePtr)) return &arrayDecoder{ valueDecoder: dec, elemType: elemType, diff --git a/vendor/github.com/goccy/go-json/internal/decoder/map.go b/vendor/github.com/goccy/go-json/internal/decoder/map.go index 7a6eea34..07a9caea 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/map.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/map.go @@ -88,7 +88,7 @@ func (d *mapDecoder) DecodeStream(s *Stream, depth int64, p unsafe.Pointer) erro mapValue = makemap(d.mapType, 0) } s.cursor++ - if s.equalChar('}') { + if s.skipWhiteSpace() == '}' { *(*unsafe.Pointer)(p) = mapValue s.cursor++ return nil diff --git a/vendor/github.com/goccy/go-json/internal/decoder/struct.go b/vendor/github.com/goccy/go-json/internal/decoder/struct.go index 6d326548..313da153 100644 --- a/vendor/github.com/goccy/go-json/internal/decoder/struct.go +++ b/vendor/github.com/goccy/go-json/internal/decoder/struct.go @@ -51,6 +51,14 @@ func init() { } } +func toASCIILower(s string) string { + b := []byte(s) + for i := range b { + b[i] = largeToSmallTable[b[i]] + } + return string(b) +} + func newStructDecoder(structName, fieldName string, fieldMap map[string]*structFieldSet) *structDecoder { return &structDecoder{ fieldMap: fieldMap, @@ -91,6 +99,10 @@ func (d *structDecoder) tryOptimize() { for k, v := range d.fieldMap { key := strings.ToLower(k) if key != k { + if key != toASCIILower(k) { + d.isTriedOptimize = true + return + } // already exists same key (e.g. Hello and HELLO has same lower case key if _, exists := conflicted[key]; exists { d.isTriedOptimize = true @@ -158,49 +170,53 @@ func (d *structDecoder) tryOptimize() { } // decode from '\uXXXX' -func decodeKeyCharByUnicodeRune(buf []byte, cursor int64) ([]byte, int64) { +func decodeKeyCharByUnicodeRune(buf []byte, cursor int64) ([]byte, int64, error) { const defaultOffset = 4 const surrogateOffset = 6 + if cursor+defaultOffset >= int64(len(buf)) { + return nil, 0, errors.ErrUnexpectedEndOfJSON("escaped string", cursor) + } + r := unicodeToRune(buf[cursor : cursor+defaultOffset]) if utf16.IsSurrogate(r) { cursor += defaultOffset if cursor+surrogateOffset >= int64(len(buf)) || buf[cursor] != '\\' || buf[cursor+1] != 'u' { - return []byte(string(unicode.ReplacementChar)), cursor + defaultOffset - 1 + return []byte(string(unicode.ReplacementChar)), cursor + defaultOffset - 1, nil } cursor += 2 r2 := unicodeToRune(buf[cursor : cursor+defaultOffset]) if r := utf16.DecodeRune(r, r2); r != unicode.ReplacementChar { - return []byte(string(r)), cursor + defaultOffset - 1 + return []byte(string(r)), cursor + defaultOffset - 1, nil } } - return []byte(string(r)), cursor + defaultOffset - 1 + return []byte(string(r)), cursor + defaultOffset - 1, nil } -func decodeKeyCharByEscapedChar(buf []byte, cursor int64) ([]byte, int64) { +func decodeKeyCharByEscapedChar(buf []byte, cursor int64) ([]byte, int64, error) { c := buf[cursor] cursor++ switch c { case '"': - return []byte{'"'}, cursor + return []byte{'"'}, cursor, nil case '\\': - return []byte{'\\'}, cursor + return []byte{'\\'}, cursor, nil case '/': - return []byte{'/'}, cursor + return []byte{'/'}, cursor, nil case 'b': - return []byte{'\b'}, cursor + return []byte{'\b'}, cursor, nil case 'f': - return []byte{'\f'}, cursor + return []byte{'\f'}, cursor, nil case 'n': - return []byte{'\n'}, cursor + return []byte{'\n'}, cursor, nil case 'r': - return []byte{'\r'}, cursor + return []byte{'\r'}, cursor, nil case 't': - return []byte{'\t'}, cursor + return []byte{'\t'}, cursor, nil case 'u': return decodeKeyCharByUnicodeRune(buf, cursor) } - return nil, cursor + return nil, cursor, nil } func decodeKeyByBitmapUint8(d *structDecoder, buf []byte, cursor int64) (int64, *structFieldSet, error) { @@ -242,7 +258,10 @@ func decodeKeyByBitmapUint8(d *structDecoder, buf []byte, cursor int64) (int64, return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) case '\\': cursor++ - chars, nextCursor := decodeKeyCharByEscapedChar(buf, cursor) + chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) + if err != nil { + return 0, nil, err + } for _, c := range chars { curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { @@ -305,7 +324,10 @@ func decodeKeyByBitmapUint16(d *structDecoder, buf []byte, cursor int64) (int64, return 0, nil, errors.ErrUnexpectedEndOfJSON("string", cursor) case '\\': cursor++ - chars, nextCursor := decodeKeyCharByEscapedChar(buf, cursor) + chars, nextCursor, err := decodeKeyCharByEscapedChar(buf, cursor) + if err != nil { + return 0, nil, err + } for _, c := range chars { curBit &= bitmap[keyIdx][largeToSmallTable[c]] if curBit == 0 { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/code.go b/vendor/github.com/goccy/go-json/internal/encoder/code.go index 8d62a9cd..5b08faef 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/code.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/code.go @@ -397,7 +397,10 @@ func (c *StructCode) lastFieldCode(field *StructFieldCode, firstField *Opcode) * func (c *StructCode) lastAnonymousFieldCode(firstField *Opcode) *Opcode { // firstField is special StructHead operation for anonymous structure. // So, StructHead's next operation is truly struct head operation. - lastField := firstField.Next + for firstField.Op == OpStructHead || firstField.Op == OpStructField { + firstField = firstField.Next + } + lastField := firstField for lastField.NextField != nil { lastField = lastField.NextField } @@ -437,11 +440,6 @@ func (c *StructCode) ToOpcode(ctx *compileContext) Opcodes { } if isEndField { endField := fieldCodes.Last() - if isEmbeddedStruct(field) { - firstField.End = endField - lastField := c.lastAnonymousFieldCode(firstField) - lastField.NextField = endField - } if len(codes) > 0 { codes.First().End = endField } else { @@ -698,7 +696,15 @@ func (c *StructFieldCode) addStructEndCode(ctx *compileContext, codes Opcodes) O Indent: ctx.indent, } codes.Last().Next = end - codes.First().NextField = end + code := codes.First() + for code.Op == OpStructField || code.Op == OpStructHead { + code = code.Next + } + for code.NextField != nil { + code = code.NextField + } + code.NextField = end + codes = codes.Add(end) ctx.incOpcodeIndex() return codes diff --git a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go index 3b3ff3fd..3ae39ba8 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/compiler.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/compiler.go @@ -617,6 +617,13 @@ func (c *Compiler) structCode(typ *runtime.Type, isPtr bool) (*StructCode, error return code, nil } +func toElemType(t *runtime.Type) *runtime.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + func (c *Compiler) structFieldCode(structCode *StructCode, tag *runtime.StructTag, isPtr, isOnlyOneFirstField bool) (*StructFieldCode, error) { field := tag.Field fieldType := runtime.Type2RType(field.Type) @@ -626,7 +633,7 @@ func (c *Compiler) structFieldCode(structCode *StructCode, tag *runtime.StructTa key: tag.Key, tag: tag, offset: field.Offset, - isAnonymous: field.Anonymous && !tag.IsTaggedKey, + isAnonymous: field.Anonymous && !tag.IsTaggedKey && toElemType(fieldType).Kind() == reflect.Struct, isTaggedKey: tag.IsTaggedKey, isNilableType: c.isNilableType(fieldType), isNilCheck: true, diff --git a/vendor/github.com/goccy/go-json/internal/encoder/opcode.go b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go index 05fc3ce0..df22f554 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/opcode.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/opcode.go @@ -1,7 +1,9 @@ package encoder import ( + "bytes" "fmt" + "sort" "strings" "unsafe" @@ -555,6 +557,87 @@ func (c *Opcode) Dump() string { return strings.Join(codes, "\n") } +func (c *Opcode) DumpDOT() string { + type edge struct { + from, to *Opcode + label string + weight int + } + var edges []edge + + b := &bytes.Buffer{} + fmt.Fprintf(b, "digraph \"%p\" {\n", c.Type) + fmt.Fprintln(b, "mclimit=1.5;\nrankdir=TD;\nordering=out;\nnode[shape=box];") + for code := c; !code.IsEnd(); { + label := code.Op.String() + fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, label) + if p := code.Next; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "Next", + weight: 10, + }) + } + if p := code.NextField; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "NextField", + weight: 2, + }) + } + if p := code.End; p != nil { + edges = append(edges, edge{ + from: code, + to: p, + label: "End", + weight: 1, + }) + } + if p := code.Jmp; p != nil { + edges = append(edges, edge{ + from: code, + to: p.Code, + label: "Jmp", + weight: 1, + }) + } + + switch code.Op.CodeType() { + case CodeSliceHead: + code = code.Next + case CodeMapHead: + code = code.Next + case CodeArrayElem, CodeSliceElem: + code = code.End + case CodeMapKey: + code = code.End + case CodeMapValue: + code = code.Next + case CodeMapEnd: + code = code.Next + case CodeStructField: + code = code.Next + case CodeStructEnd: + code = code.Next + default: + code = code.Next + } + if code.IsEnd() { + fmt.Fprintf(b, "\"%p\" [label=%q];\n", code, code.Op.String()) + } + } + sort.Slice(edges, func(i, j int) bool { + return edges[i].to.DisplayIdx < edges[j].to.DisplayIdx + }) + for _, e := range edges { + fmt.Fprintf(b, "\"%p\" -> \"%p\" [label=%q][weight=%d];\n", e.from, e.to, e.label, e.weight) + } + fmt.Fprint(b, "}") + return b.String() +} + func newSliceHeaderCode(ctx *compileContext, typ *runtime.Type) *Opcode { idx := opcodeOffset(ctx.ptrIndex) ctx.incPtrIndex() diff --git a/vendor/github.com/goccy/go-json/internal/encoder/option.go b/vendor/github.com/goccy/go-json/internal/encoder/option.go index 82d5ce3e..12c58e46 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/option.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/option.go @@ -23,6 +23,7 @@ type Option struct { ColorScheme *ColorScheme Context context.Context DebugOut io.Writer + DebugDOTOut io.WriteCloser } type EncodeFormat struct { diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go index fbbc0de4..82b6dd47 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm/debug_vm.go @@ -2,6 +2,7 @@ package vm import ( "fmt" + "io" "github.com/goccy/go-json/internal/encoder" ) @@ -14,6 +15,11 @@ func DebugRun(ctx *encoder.RuntimeContext, b []byte, codeSet *encoder.OpcodeSet) } else { code = codeSet.NoescapeKeyCode } + if wc := ctx.Option.DebugDOTOut; wc != nil { + _, _ = io.WriteString(wc, code.DumpDOT()) + wc.Close() + ctx.Option.DebugDOTOut = nil + } if err := recover(); err != nil { w := ctx.Option.DebugOut diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go index 60e4a8ed..2395abec 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_color_indent/util.go @@ -189,7 +189,7 @@ func appendNullComma(ctx *encoder.RuntimeContext, b []byte) []byte { } func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { - return append(b, ':', ' ') + return append(b[:len(b)-2], ':', ' ') } func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { @@ -229,8 +229,9 @@ func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { last := len(b) - 1 - b[last] = '\n' - b = appendIndent(ctx, b, code.Indent-1) + // replace comma to newline + b[last-1] = '\n' + b = appendIndent(ctx, b[:last], code.Indent) return append(b, '}', ',', '\n') } diff --git a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go index fca8f185..6cb745e3 100644 --- a/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go +++ b/vendor/github.com/goccy/go-json/internal/encoder/vm_indent/util.go @@ -133,7 +133,7 @@ func appendNullComma(_ *encoder.RuntimeContext, b []byte) []byte { } func appendColon(_ *encoder.RuntimeContext, b []byte) []byte { - return append(b, ':', ' ') + return append(b[:len(b)-2], ':', ' ') } func appendMapKeyValue(ctx *encoder.RuntimeContext, code *encoder.Opcode, b, key, value []byte) []byte { @@ -173,8 +173,9 @@ func appendEmptyObject(_ *encoder.RuntimeContext, b []byte) []byte { func appendObjectEnd(ctx *encoder.RuntimeContext, code *encoder.Opcode, b []byte) []byte { last := len(b) - 1 - b[last] = '\n' - b = appendIndent(ctx, b, code.Indent-1) + // replace comma to newline + b[last-1] = '\n' + b = appendIndent(ctx, b[:last], code.Indent) return append(b, '}', ',', '\n') } diff --git a/vendor/github.com/goccy/go-json/option.go b/vendor/github.com/goccy/go-json/option.go index af400a45..378031a0 100644 --- a/vendor/github.com/goccy/go-json/option.go +++ b/vendor/github.com/goccy/go-json/option.go @@ -48,6 +48,13 @@ func DebugWith(w io.Writer) EncodeOptionFunc { } } +// DebugDOT sets the destination to write opcodes graph. +func DebugDOT(w io.WriteCloser) EncodeOptionFunc { + return func(opt *EncodeOption) { + opt.DebugDOTOut = w + } +} + // Colorize add an identifier for coloring to the string of the encoded result. func Colorize(scheme *ColorScheme) EncodeOptionFunc { return func(opt *EncodeOption) { diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d9..00000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index 52ccb5a9..00000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,18 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Amazon.com, Inc -Damian Gryski -Eric Buth -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Klaus Post -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index ea6524dd..00000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,41 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Alex Legg -Damian Gryski -Eric Buth -Jan Mercl <0xjnml@gmail.com> -Jonathan Swinney -Kai Backman -Klaus Post -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10f..00000000 --- a/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea12879..00000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 23c6e26c..00000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f65..00000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s deleted file mode 100644 index 7a3ead17..00000000 --- a/vendor/github.com/golang/snappy/decode_arm64.s +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - R2 scratch -// - R3 scratch -// - R4 length or x -// - R5 offset -// - R6 &src[s] -// - R7 &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7. -// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6. -TEXT ·decode(SB), NOSPLIT, $56-56 - // Initialize R6, R7 and R8-R13. - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R7 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R6 - MOVD R11, R13 - ADD R12, R13, R13 - -loop: - // for s < len(src) - CMP R13, R6 - BEQ end - - // R4 = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBU (R6), R4 - MOVW R4, R3 - ANDW $3, R3 - MOVW $1, R1 - CMPW R1, R3 - BGE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - MOVW $60, R1 - LSRW $2, R4, R4 - CMPW R4, R1 - BLS tagLit60Plus - - // case x < 60: - // s++ - ADD $1, R6, R6 - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that R4 == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // R4 can hold 64 bits, so the increment cannot overflow. - ADD $1, R4, R4 - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // R2 = len(dst) - d - // R3 = len(src) - s - MOVD R10, R2 - SUB R7, R2, R2 - MOVD R13, R3 - SUB R6, R3, R3 - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMP $16, R4 - BGT callMemmove - CMP $16, R2 - BLT callMemmove - CMP $16, R3 - BLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R6), (R14, R15) - STP (R14, R15), 0(R7) - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMP R2, R4 - BGT errCorrupt - CMP R3, R4 - BGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // R7, R6 and R4 as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVD R7, 8(RSP) - MOVD R6, 16(RSP) - MOVD R4, 24(RSP) - MOVD R7, 32(RSP) - MOVD R6, 40(RSP) - MOVD R4, 48(RSP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVD 32(RSP), R7 - MOVD 40(RSP), R6 - MOVD 48(RSP), R4 - MOVD dst_base+0(FP), R8 - MOVD dst_len+8(FP), R9 - MOVD R8, R10 - ADD R9, R10, R10 - MOVD src_base+24(FP), R11 - MOVD src_len+32(FP), R12 - MOVD R11, R13 - ADD R12, R13, R13 - - // d += length - // s += length - ADD R4, R7, R7 - ADD R4, R6, R6 - B loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADD R4, R6, R6 - SUB $58, R6, R6 - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // case x == 60: - MOVW $61, R1 - CMPW R1, R4 - BEQ tagLit61 - BGT tagLit62Plus - - // x = uint32(src[s-1]) - MOVBU -1(R6), R4 - B doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVHU -2(R6), R4 - B doLit - -tagLit62Plus: - CMPW $62, R4 - BHI tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVHU -3(R6), R4 - MOVBU -1(R6), R3 - ORR R3<<16, R4 - B doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVWU -4(R6), R4 - B doLit - - // The code above handles literal tags. - // ---------------------------------------- - // The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADD $5, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-5])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVWU -4(R6), R5 - B doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADD $3, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // length = 1 + int(src[s-3])>>2 - MOVD $1, R1 - ADD R4>>2, R1, R4 - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVHU -2(R6), R5 - B doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - R3 == src[s] & 0x03 - // - R4 == src[s] - CMP $2, R3 - BEQ tagCopy2 - BGT tagCopy4 - - // case tagCopy1: - // s += 2 - ADD $2, R6, R6 - - // if uint(s) > uint(len(src)) { etc } - MOVD R6, R3 - SUB R11, R3, R3 - CMP R12, R3 - BGT errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVD R4, R5 - AND $0xe0, R5 - MOVBU -1(R6), R3 - ORR R5<<3, R3, R5 - - // length = 4 + int(src[s-2])>>2&0x7 - MOVD $7, R1 - AND R4>>2, R1, R4 - ADD $4, R4, R4 - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - R4 == length && R4 > 0 - // - R5 == offset - - // if offset <= 0 { etc } - MOVD $0, R1 - CMP R1, R5 - BLE errCorrupt - - // if d < offset { etc } - MOVD R7, R3 - SUB R8, R3, R3 - CMP R5, R3 - BLT errCorrupt - - // if length > len(dst)-d { etc } - MOVD R10, R3 - SUB R7, R3, R3 - CMP R3, R4 - BGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVD R10, R14 - SUB R7, R14, R14 - MOVD R7, R15 - SUB R5, R15, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMP $16, R4 - BGT slowForwardCopy - CMP $8, R5 - BLT slowForwardCopy - CMP $16, R14 - BLT slowForwardCopy - MOVD 0(R15), R2 - MOVD R2, 0(R7) - MOVD 8(R15), R3 - MOVD R3, 8(R7) - ADD R4, R7, R7 - B loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUB $10, R14, R14 - CMP R14, R4 - BGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMP $8, R5 - BGE fixUpSlowForwardCopy - MOVD (R15), R3 - MOVD R3, (R7) - SUB R5, R4, R4 - ADD R5, R7, R7 - ADD R5, R5, R5 - B makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by R7 being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save R7 to R2 so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVD R7, R2 - ADD R4, R7, R7 - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - MOVD $0, R1 - CMP R1, R4 - BLE loop - MOVD (R15), R3 - MOVD R3, (R2) - ADD $8, R15, R15 - ADD $8, R2, R2 - SUB $8, R4, R4 - B finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), R3 - MOVB R3, (R7) - ADD $1, R15, R15 - ADD $1, R7, R7 - SUB $1, R4, R4 - CBNZ R4, verySlowForwardCopy - B loop - - // The code above handles copy tags. - // ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMP R10, R7 - BNE errCorrupt - - // return 0 - MOVD $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVD $1, R2 - MOVD R2, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_asm.go b/vendor/github.com/golang/snappy/decode_asm.go deleted file mode 100644 index 7082b349..00000000 --- a/vendor/github.com/golang/snappy/decode_asm.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 2f672be5..00000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!arm64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 7f236570..00000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979f..00000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s deleted file mode 100644 index f8d54adf..00000000 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ /dev/null @@ -1,722 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - R3 len(lit) -// - R4 n -// - R6 return value -// - R8 &dst[i] -// - R10 &lit[0] -// -// The 32 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $32-56 - MOVD dst_base+0(FP), R8 - MOVD lit_base+24(FP), R10 - MOVD lit_len+32(FP), R3 - MOVD R3, R6 - MOVW R3, R4 - SUBW $1, R4, R4 - - CMPW $60, R4 - BLT oneByte - CMPW $256, R4 - BLT twoBytes - -threeBytes: - MOVD $0xf4, R2 - MOVB R2, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - ADD $3, R6, R6 - B memmove - -twoBytes: - MOVD $0xf0, R2 - MOVB R2, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - ADD $2, R6, R6 - B memmove - -oneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - ADD $1, R6, R6 - -memmove: - MOVD R6, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - R3 length -// - R7 &dst[0] -// - R8 &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVD dst_base+0(FP), R8 - MOVD R8, R7 - MOVD offset+24(FP), R11 - MOVD length+32(FP), R3 - -loop0: - // for length >= 68 { etc } - CMPW $68, R3 - BLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $64, R3, R3 - B loop0 - -step1: - // if length > 64 { etc } - CMP $64, R3 - BLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R2 - MOVB R2, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUB $60, R3, R3 - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMP $12, R3 - BGE step3 - CMPW $2048, R11 - BGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $3, R11, R11 - AND $0xe0, R11, R11 - SUB $4, R3, R3 - LSLW $2, R3 - AND $0xff, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUB $1, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - - // Return the number of bytes written. - SUB R7, R8, R8 - MOVD R8, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - R6 &src[0] -// - R7 &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVD src_base+0(FP), R6 - MOVD src_len+8(FP), R14 - MOVD i+24(FP), R15 - MOVD j+32(FP), R7 - ADD R6, R14, R14 - ADD R6, R15, R15 - ADD R6, R7, R7 - MOVD R14, R13 - SUB $8, R13, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI cmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE bsf - ADD $8, R15, R15 - ADD $8, R7, R7 - B cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS extendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE extendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUB R6, R7, R7 - MOVD R7, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - R3 . . -// - R4 . . -// - R5 64 shift -// - R6 72 &src[0], tableSize -// - R7 80 &src[s] -// - R8 88 &dst[d] -// - R9 96 sLimit -// - R10 . &src[nextEmit] -// - R11 104 prevHash, currHash, nextHash, offset -// - R12 112 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 120 candidate -// - R16 . hash constant, 0x1e35a7bd -// - R17 . &table -// - . 128 table -// -// The second column (64, 72, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 64 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 64 + 64 = 32896. -TEXT ·encodeBlock(SB), 0, $32896-56 - MOVD dst_base+0(FP), R8 - MOVD src_base+24(FP), R7 - MOVD src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVD $24, R5 - MOVD $256, R6 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - MOVD $16384, R2 - CMP R2, R6 - BGE varTable - CMP R14, R6 - BGE varTable - SUB $1, R5, R5 - LSL $1, R6, R6 - B calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each - // iterations writes 64 bytes, so we can do only tableSize/32 writes - // instead of the 2048 writes that would zero-initialize all of table's - // 32768 bytes. This clear could overrun the first tableSize elements, but - // it won't overrun the allocated stack size. - ADD $128, RSP, R17 - MOVD R17, R4 - - // !!! R6 = &src[tableSize] - ADD R6<<1, R17, R6 - -memclr: - STP.P (ZR, ZR), 64(R4) - STP (ZR, ZR), -48(R4) - STP (ZR, ZR), -32(R4) - STP (ZR, ZR), -16(R4) - CMP R4, R6 - BHI memclr - - // !!! R6 = &src[0] - MOVD R7, R6 - - // sLimit := len(src) - inputMargin - MOVD R14, R9 - SUB $15, R9, R9 - - // !!! Pre-emptively spill R5, R6 and R9 to the stack. Their values don't - // change for the rest of the function. - MOVD R5, 64(RSP) - MOVD R6, 72(RSP) - MOVD R9, 96(RSP) - - // nextEmit := 0 - MOVD R6, R10 - - // s := 1 - ADD $1, R7, R7 - - // nextHash := hash(load32(src, s), shift) - MOVW 0(R7), R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - -outer: - // for { etc } - - // skip := 32 - MOVD $32, R12 - - // nextS := s - MOVD R7, R13 - - // candidate := 0 - MOVD $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVD R13, R7 - - // bytesBetweenHashLookups := skip >> 5 - MOVD R12, R14 - LSR $5, R14, R14 - - // nextS = s + bytesBetweenHashLookups - ADD R14, R13, R13 - - // skip += bytesBetweenHashLookups - ADD R14, R12, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVD R13, R3 - SUB R6, R3, R3 - CMP R9, R3 - BHI emitRemainder - - // candidate = int(table[nextHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[nextHash] = uint16(s) - MOVD R7, R3 - SUB R6, R3, R3 - - MOVH R3, 0(R17)(R11<<1) - - // nextHash = hash(load32(src, nextS), shift) - MOVW 0(R13), R11 - MULW R16, R11 - LSRW R5, R11, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVW 0(R7), R3 - MOVW (R6)(R15), R4 - CMPW R4, R3 - BNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVD R7, R3 - SUB R10, R3, R3 - CMP $16, R3 - BLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVW R3, R4 - SUBW $1, R4, R4 - - MOVW $60, R2 - CMPW R2, R4 - BLT inlineEmitLiteralOneByte - MOVW $256, R2 - CMPW R2, R4 - BLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVD $0xf4, R1 - MOVB R1, 0(R8) - MOVW R4, 1(R8) - ADD $3, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVD $0xf0, R1 - MOVB R1, 0(R8) - MOVB R4, 1(R8) - ADD $2, R8, R8 - B inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - LSLW $2, R4, R4 - MOVB R4, 0(R8) - ADD $1, R8, R8 - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // R8, R10 and R3 as arguments. - MOVD R8, 8(RSP) - MOVD R10, 16(RSP) - MOVD R3, 24(RSP) - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADD R3, R8, R8 - MOVD R7, 80(RSP) - MOVD R8, 88(RSP) - MOVD R15, 120(RSP) - CALL runtime·memmove(SB) - MOVD 64(RSP), R5 - MOVD 72(RSP), R6 - MOVD 80(RSP), R7 - MOVD 88(RSP), R8 - MOVD 96(RSP), R9 - MOVD 120(RSP), R15 - ADD $128, RSP, R17 - MOVW $0xa7bd, R16 - MOVKW $(0x1e35<<16), R16 - B inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB R3, R4 - SUBW $1, R4, R4 - AND $0xff, R4, R4 - LSLW $2, R4, R4 - MOVB R4, (R8) - ADD $1, R8, R8 - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - LDP 0(R10), (R0, R1) - STP (R0, R1), 0(R8) - ADD R3, R8, R8 - -inner1: - // for { etc } - - // base := s - MOVD R7, R12 - - // !!! offset := base - candidate - MOVD R12, R11 - SUB R15, R11, R11 - SUB R6, R11, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVD src_len+32(FP), R14 - ADD R6, R14, R14 - - // !!! R13 = &src[len(src) - 8] - MOVD R14, R13 - SUB $8, R13, R13 - - // !!! R15 = &src[candidate + 4] - ADD $4, R15, R15 - ADD R6, R15, R15 - - // !!! s += 4 - ADD $4, R7, R7 - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMP R13, R7 - BHI inlineExtendMatchCmp1 - MOVD (R15), R3 - MOVD (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchBSF - ADD $8, R15, R15 - ADD $8, R7, R7 - B inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. - // RBIT reverses the bit order, then CLZ counts the leading zeros, the - // combination of which finds the least significant bit which is set. - // The arm64 architecture is little-endian, and the shift by 3 converts - // a bit index to a byte index. - EOR R3, R4, R4 - RBIT R4, R4 - CLZ R4, R4 - ADD R4>>3, R7, R7 - B inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMP R7, R14 - BLS inlineExtendMatchEnd - MOVB (R15), R3 - MOVB (R7), R4 - CMP R4, R3 - BNE inlineExtendMatchEnd - ADD $1, R15, R15 - ADD $1, R7, R7 - B inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVD R7, R3 - SUB R12, R3, R3 - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - MOVW $68, R2 - CMPW R2, R3 - BLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVD $0xfe, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $64, R3, R3 - B inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - MOVW $64, R2 - CMPW R2, R3 - BLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVD $0xee, R1 - MOVB R1, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - SUBW $60, R3, R3 - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - MOVW $12, R2 - CMPW R2, R3 - BGE inlineEmitCopyStep3 - MOVW $2048, R2 - CMPW R2, R11 - BGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(R8) - LSRW $8, R11, R11 - LSLW $5, R11, R11 - SUBW $4, R3, R3 - AND $0xff, R3, R3 - LSLW $2, R3, R3 - ORRW R3, R11, R11 - ORRW $1, R11, R11 - MOVB R11, 0(R8) - ADD $2, R8, R8 - B inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBW $1, R3, R3 - LSLW $2, R3, R3 - ORRW $2, R3, R3 - MOVB R3, 0(R8) - MOVW R11, 1(R8) - ADD $3, R8, R8 - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVD R7, R10 - - // if s >= sLimit { goto emitRemainder } - MOVD R7, R3 - SUB R6, R3, R3 - CMP R3, R9 - BLS emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVD -1(R7), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // table[prevHash] = uint16(s-1) - MOVD R7, R3 - SUB R6, R3, R3 - SUB $1, R3, R3 - - MOVHU R3, 0(R17)(R11<<1) - - // currHash := hash(uint32(x>>8), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // candidate = int(table[currHash]) - MOVHU 0(R17)(R11<<1), R15 - - // table[currHash] = uint16(s) - ADD $1, R3, R3 - MOVHU R3, 0(R17)(R11<<1) - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVW (R6)(R15), R4 - CMPW R4, R14 - BEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - LSR $8, R14, R14 - MOVW R14, R11 - MULW R16, R11, R11 - LSRW R5, R11, R11 - - // s++ - ADD $1, R7, R7 - - // break out of the inner1 for loop, i.e. continue the outer loop. - B outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVD src_len+32(FP), R3 - ADD R6, R3, R3 - CMP R3, R10 - BEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVD R8, 8(RSP) - MOVD $0, 16(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD $0, 24(RSP) // Unnecessary, as the callee ignores it, but conservative. - MOVD R10, 32(RSP) - SUB R10, R3, R3 - MOVD R3, 40(RSP) - MOVD R3, 48(RSP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVD R8, 88(RSP) - CALL ·emitLiteral(SB) - MOVD 88(RSP), R8 - - // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVD 56(RSP), R1 - ADD R1, R8, R8 - -encodeBlockEnd: - MOVD dst_base+0(FP), R3 - SUB R3, R8, R8 - MOVD R8, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_asm.go b/vendor/github.com/golang/snappy/encode_asm.go deleted file mode 100644 index 107c1e71..00000000 --- a/vendor/github.com/golang/snappy/encode_asm.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm -// +build amd64 arm64 - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index 296d7f0b..00000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64,!arm64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index ece692ea..00000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/minio/asm2plan9s/LICENSE b/vendor/github.com/google/flatbuffers/LICENSE similarity index 100% rename from vendor/github.com/minio/asm2plan9s/LICENSE rename to vendor/github.com/google/flatbuffers/LICENSE diff --git a/vendor/github.com/google/flatbuffers/go/lib.go b/vendor/github.com/google/flatbuffers/go/lib.go index 9333d8bd..a4e99de1 100644 --- a/vendor/github.com/google/flatbuffers/go/lib.go +++ b/vendor/github.com/google/flatbuffers/go/lib.go @@ -28,3 +28,23 @@ func GetSizePrefix(buf []byte, offset UOffsetT) uint32 { func GetIndirectOffset(buf []byte, offset UOffsetT) UOffsetT { return offset + GetUOffsetT(buf[offset:]) } + +// GetBufferIdentifier returns the file identifier as string +func GetBufferIdentifier(buf []byte) string { + return string(buf[SizeUOffsetT:][:fileIdentifierLength]) +} + +// GetBufferIdentifier returns the file identifier as string for a size-prefixed buffer +func GetSizePrefixedBufferIdentifier(buf []byte) string { + return string(buf[SizeUOffsetT+sizePrefixLength:][:fileIdentifierLength]) +} + +// BufferHasIdentifier checks if the identifier in a buffer has the expected value +func BufferHasIdentifier(buf []byte, identifier string) bool { + return GetBufferIdentifier(buf) == identifier +} + +// BufferHasIdentifier checks if the identifier in a buffer has the expected value for a size-prefixed buffer +func SizePrefixedBufferHasIdentifier(buf []byte, identifier string) bool { + return GetSizePrefixedBufferIdentifier(buf) == identifier +} diff --git a/vendor/github.com/google/s2a-go/README.md b/vendor/github.com/google/s2a-go/README.md index d566950f..fe0f5c1d 100644 --- a/vendor/github.com/google/s2a-go/README.md +++ b/vendor/github.com/google/s2a-go/README.md @@ -10,8 +10,5 @@ Session Agent during the TLS handshake, and to encrypt traffic to the peer after the TLS handshake is complete. This repository contains the source code for the Secure Session Agent's Go -client libraries, which allow gRPC-Go applications to use the Secure Session -Agent. This repository supports the Bazel and Golang build systems. - -All code in this repository is experimental and subject to change. We do not -guarantee API stability at this time. +client libraries, which allow gRPC and HTTP Go applications to use the Secure Session +Agent. diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go index 49573af8..ed449653 100644 --- a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go +++ b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go @@ -21,50 +21,27 @@ package service import ( "context" - "net" - "os" - "strings" "sync" - "time" - "google.golang.org/appengine" - "google.golang.org/appengine/socket" grpc "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) -// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A. -const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER" - var ( - // appEngineDialerHook is an AppEngine-specific dial option that is set - // during init time. If nil, then the application is not running on Google - // AppEngine. - appEngineDialerHook func(context.Context) grpc.DialOption // mu guards hsConnMap and hsDialer. mu sync.Mutex // hsConnMap represents a mapping from an S2A handshaker service address // to a corresponding connection to an S2A handshaker service instance. hsConnMap = make(map[string]*grpc.ClientConn) // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial + hsDialer = grpc.DialContext ) -func init() { - if !appengine.IsAppEngine() && !appengine.IsDevAppServer() { - return - } - appEngineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} - // Dial dials the S2A handshaker service. If a connection has already been // established, this function returns it. Otherwise, a new connection is // created. -func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { +func Dial(ctx context.Context, handshakerServiceAddress string, transportCreds credentials.TransportCredentials) (*grpc.ClientConn, error) { mu.Lock() defer mu.Unlock() @@ -72,17 +49,14 @@ func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { if !ok { // Create a new connection to the S2A handshaker service. Note that // this connection stays open until the application is closed. - grpcOpts := []grpc.DialOption{ - grpc.WithInsecure(), - } - if enableAppEngineDialer() && appEngineDialerHook != nil { - if grpclog.V(1) { - grpclog.Info("Using AppEngine-specific dialer to talk to S2A.") - } - grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background())) + var grpcOpts []grpc.DialOption + if transportCreds != nil { + grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(transportCreds)) + } else { + grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) } var err error - hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...) + hsConn, err = hsDialer(ctx, handshakerServiceAddress, grpcOpts...) if err != nil { return nil, err } @@ -90,10 +64,3 @@ func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { } return hsConn, nil } - -func enableAppEngineDialer() bool { - if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" { - return true - } - return false -} diff --git a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go index 33fa3c55..e51199ab 100644 --- a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go +++ b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go @@ -83,13 +83,15 @@ func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete ch t.ensureProcessSessionTickets.Done() } }() - hsConn, err := service.Dial(t.hsAddr) + ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) + defer cancel() + // The transportCreds only needs to be set when talking to S2AV2 and also + // if mTLS is required. + hsConn, err := service.Dial(ctx, t.hsAddr, nil) if err != nil { return err } client := s2apb.NewS2AServiceClient(hsConn) - ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) - defer cancel() session, err := client.SetUpSession(ctx) if err != nil { return err diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go index ff172883..85a8379d 100644 --- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -33,6 +33,7 @@ import ( "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2/tlsconfigstore" + "github.com/google/s2a-go/retry" "github.com/google/s2a-go/stream" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -44,18 +45,19 @@ import ( const ( s2aSecurityProtocol = "tls" - defaultS2ATimeout = 3 * time.Second + defaultS2ATimeout = 6 * time.Second ) // An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake. const s2aTimeoutEnv = "S2A_TIMEOUT" type s2av2TransportCreds struct { - info *credentials.ProtocolInfo - isClient bool - serverName string - s2av2Address string - tokenManager *tokenmanager.AccessTokenManager + info *credentials.ProtocolInfo + isClient bool + serverName string + s2av2Address string + transportCreds credentials.TransportCredentials + tokenManager *tokenmanager.AccessTokenManager // localIdentity should only be used by the client. localIdentity *commonpbv1.Identity // localIdentities should only be used by the server. @@ -68,7 +70,7 @@ type s2av2TransportCreds struct { // NewClientCreds returns a client-side transport credentials object that uses // the S2Av2 to establish a secure connection with a server. -func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { +func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() @@ -79,6 +81,7 @@ func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, ver isClient: true, serverName: "", s2av2Address: s2av2Address, + transportCreds: transportCreds, localIdentity: localIdentity, verificationMode: verificationMode, fallbackClientHandshake: fallbackClientHandshakeFunc, @@ -98,7 +101,7 @@ func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, ver // NewServerCreds returns a server-side transport credentials object that uses // the S2Av2 to establish a secure connection with a client. -func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { +func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { // Create an AccessTokenManager instance to use to authenticate to S2Av2. accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() creds := &s2av2TransportCreds{ @@ -107,6 +110,7 @@ func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, }, isClient: false, s2av2Address: s2av2Address, + transportCreds: transportCreds, localIdentities: localIdentities, verificationMode: verificationMode, getS2AStream: getS2AStream, @@ -131,7 +135,13 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori serverName := removeServerNamePort(serverAuthority) timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout()) defer cancel() - s2AStream, err := createStream(timeoutCtx, c.s2av2Address, c.getS2AStream) + var s2AStream stream.S2AStream + var err error + retry.Run(timeoutCtx, + func() error { + s2AStream, err = createStream(timeoutCtx, c.s2av2Address, c.transportCreds, c.getS2AStream) + return err + }) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -152,31 +162,34 @@ func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthori tokenManager = *c.tokenManager } - if c.serverName == "" { - config, err = tlsconfigstore.GetTLSConfigurationForClient(serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) - if err != nil { - grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err - } - } else { - config, err = tlsconfigstore.GetTLSConfigurationForClient(c.serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) - if err != nil { - grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) - if c.fallbackClientHandshake != nil { - return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) - } - return nil, nil, err + sn := serverName + if c.serverName != "" { + sn = c.serverName + } + retry.Run(timeoutCtx, + func() error { + config, err = tlsconfigstore.GetTLSConfigurationForClient(sn, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) + return err + }) + if err != nil { + grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) } + return nil, nil, err } if grpclog.V(1) { grpclog.Infof("Got client TLS config from S2Av2.") } - creds := credentials.NewTLS(config) - conn, authInfo, err := creds.ClientHandshake(ctx, serverName, rawConn) + creds := credentials.NewTLS(config) + var conn net.Conn + var authInfo credentials.AuthInfo + retry.Run(timeoutCtx, + func() error { + conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn) + return err + }) if err != nil { grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) if c.fallbackClientHandshake != nil { @@ -196,7 +209,13 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede } ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout()) defer cancel() - s2AStream, err := createStream(ctx, c.s2av2Address, c.getS2AStream) + var s2AStream stream.S2AStream + var err error + retry.Run(ctx, + func() error { + s2AStream, err = createStream(ctx, c.s2av2Address, c.transportCreds, c.getS2AStream) + return err + }) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, nil, err @@ -213,7 +232,12 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede tokenManager = *c.tokenManager } - config, err := tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) + var config *tls.Config + retry.Run(ctx, + func() error { + config, err = tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) + return err + }) if err != nil { grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err) return nil, nil, err @@ -221,8 +245,20 @@ func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, crede if grpclog.V(1) { grpclog.Infof("Got server TLS config from S2Av2.") } + creds := credentials.NewTLS(config) - return creds.ServerHandshake(rawConn) + var conn net.Conn + var authInfo credentials.AuthInfo + retry.Run(ctx, + func() error { + conn, authInfo, err = creds.ServerHandshake(rawConn) + return err + }) + if err != nil { + grpclog.Infof("Failed to do server handshake using S2Av2: %v", err) + return nil, nil, err + } + return conn, authInfo, err } // Info returns protocol info of s2av2TransportCreds. @@ -278,11 +314,12 @@ func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { func NewClientTLSConfig( ctx context.Context, s2av2Address string, + transportCreds credentials.TransportCredentials, tokenManager tokenmanager.AccessTokenManager, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverName string, serverAuthorizationPolicy []byte) (*tls.Config, error) { - s2AStream, err := createStream(ctx, s2av2Address, nil) + s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil) if err != nil { grpclog.Infof("Failed to connect to S2Av2: %v", err) return nil, err @@ -325,12 +362,12 @@ func (x s2AGrpcStream) CloseSend() error { return x.stream.CloseSend() } -func createStream(ctx context.Context, s2av2Address string, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { +func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { if getS2AStream != nil { return getS2AStream(ctx, s2av2Address) } // TODO(rmehta19): Consider whether to close the connection to S2Av2. - conn, err := service.Dial(s2av2Address) + conn, err := service.Dial(ctx, s2av2Address, transportCreds) if err != nil { return nil, err } diff --git a/vendor/github.com/google/s2a-go/retry/retry.go b/vendor/github.com/google/s2a-go/retry/retry.go new file mode 100644 index 00000000..f7e0a237 --- /dev/null +++ b/vendor/github.com/google/s2a-go/retry/retry.go @@ -0,0 +1,144 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package retry provides a retry helper for talking to S2A gRPC server. +// The implementation is modeled after +// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/retry.go +package retry + +import ( + "context" + "math/rand" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + maxRetryAttempts = 5 + maxRetryForLoops = 10 +) + +type defaultBackoff struct { + max time.Duration + mul float64 + cur time.Duration +} + +// Pause returns a duration, which is used as the backoff wait time +// before the next retry. +func (b *defaultBackoff) Pause() time.Duration { + d := time.Duration(1 + rand.Int63n(int64(b.cur))) + b.cur = time.Duration(float64(b.cur) * b.mul) + if b.cur > b.max { + b.cur = b.max + } + return d +} + +// Sleep will wait for the specified duration or return on context +// expiration. +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +// NewRetryer creates an instance of S2ARetryer using the defaultBackoff +// implementation. +var NewRetryer = func() *S2ARetryer { + return &S2ARetryer{bo: &defaultBackoff{ + cur: 100 * time.Millisecond, + max: 30 * time.Second, + mul: 2, + }} +} + +type backoff interface { + Pause() time.Duration +} + +// S2ARetryer implements a retry helper for talking to S2A gRPC server. +type S2ARetryer struct { + bo backoff + attempts int +} + +// Attempts return the number of retries attempted. +func (r *S2ARetryer) Attempts() int { + return r.attempts +} + +// Retry returns a boolean indicating whether retry should be performed +// and the backoff duration. +func (r *S2ARetryer) Retry(err error) (time.Duration, bool) { + if err == nil { + return 0, false + } + if r.attempts >= maxRetryAttempts { + return 0, false + } + r.attempts++ + return r.bo.Pause(), true +} + +// Run uses S2ARetryer to execute the function passed in, until success or reaching +// max number of retry attempts. +func Run(ctx context.Context, f func() error) { + retryer := NewRetryer() + forLoopCnt := 0 + var err error + for { + err = f() + if bo, shouldRetry := retryer.Retry(err); shouldRetry { + if grpclog.V(1) { + grpclog.Infof("will attempt retry: %v", err) + } + if ctx.Err() != nil { + if grpclog.V(1) { + grpclog.Infof("exit retry loop due to context error: %v", ctx.Err()) + } + break + } + if errSleep := Sleep(ctx, bo); errSleep != nil { + if grpclog.V(1) { + grpclog.Infof("exit retry loop due to sleep error: %v", errSleep) + } + break + } + // This shouldn't happen, just make sure we are not stuck in the for loops. + forLoopCnt++ + if forLoopCnt > maxRetryForLoops { + if grpclog.V(1) { + grpclog.Infof("exit the for loop after too many retries") + } + break + } + continue + } + if grpclog.V(1) { + grpclog.Infof("retry conditions not met, exit the loop") + } + break + } +} diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go index 1c1349de..5ecb06f9 100644 --- a/vendor/github.com/google/s2a-go/s2a.go +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -35,6 +35,7 @@ import ( "github.com/google/s2a-go/internal/handshaker/service" "github.com/google/s2a-go/internal/tokenmanager" "github.com/google/s2a-go/internal/v2" + "github.com/google/s2a-go/retry" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" @@ -111,7 +112,7 @@ func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, erro if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc } - return v2.NewClientCreds(opts.S2AAddress, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) + return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) } // NewServerCreds returns a server-side transport credentials object that uses @@ -146,7 +147,7 @@ func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, erro }, nil } verificationMode := getVerificationMode(opts.VerificationMode) - return v2.NewServerCreds(opts.S2AAddress, localIdentities, verificationMode, opts.getS2AStream) + return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream) } // ClientHandshake initiates a client-side TLS handshake using the S2A. @@ -155,17 +156,17 @@ func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority return nil, nil, errors.New("client handshake called using server transport credentials") } + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + // Connect to the S2A. - hsConn, err := service.Dial(c.s2aAddr) + hsConn, err := service.Dial(ctx, c.s2aAddr, nil) if err != nil { grpclog.Infof("Failed to connect to S2A: %v", err) return nil, nil, err } - var cancel context.CancelFunc - ctx, cancel = context.WithCancel(ctx) - defer cancel() - opts := &handshaker.ClientHandshakerOptions{ MinTLSVersion: c.minTLSVersion, MaxTLSVersion: c.maxTLSVersion, @@ -203,16 +204,16 @@ func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credent return nil, nil, errors.New("server handshake called using client transport credentials") } + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + // Connect to the S2A. - hsConn, err := service.Dial(c.s2aAddr) + hsConn, err := service.Dial(ctx, c.s2aAddr, nil) if err != nil { grpclog.Infof("Failed to connect to S2A: %v", err) return nil, nil, err } - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - opts := &handshaker.ServerHandshakerOptions{ MinTLSVersion: c.minTLSVersion, MaxTLSVersion: c.maxTLSVersion, @@ -312,6 +313,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err grpclog.Infof("Access token manager not initialized: %v", err) return &s2aTLSClientConfigFactory{ s2av2Address: opts.S2AAddress, + transportCreds: opts.TransportCreds, tokenManager: nil, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, @@ -319,6 +321,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err } return &s2aTLSClientConfigFactory{ s2av2Address: opts.S2AAddress, + transportCreds: opts.TransportCreds, tokenManager: tokenManager, verificationMode: getVerificationMode(opts.VerificationMode), serverAuthorizationPolicy: opts.serverAuthorizationPolicy, @@ -327,6 +330,7 @@ func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, err type s2aTLSClientConfigFactory struct { s2av2Address string + transportCreds credentials.TransportCredentials tokenManager tokenmanager.AccessTokenManager verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode serverAuthorizationPolicy []byte @@ -338,7 +342,7 @@ func (f *s2aTLSClientConfigFactory) Build( if opts != nil && opts.ServerName != "" { serverName = opts.ServerName } - return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) } func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { @@ -390,9 +394,15 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net } timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) defer cancel() - s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{ - ServerName: serverName, - }) + + var s2aTLSConfig *tls.Config + retry.Run(timeoutCtx, + func() error { + s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{ + ServerName: serverName, + }) + return err + }) if err != nil { grpclog.Infof("error building S2A TLS config: %v", err) return fallback(err) @@ -401,7 +411,12 @@ func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, net s2aDialer := &tls.Dialer{ Config: s2aTLSConfig, } - c, err := s2aDialer.DialContext(ctx, network, addr) + var c net.Conn + retry.Run(timeoutCtx, + func() error { + c, err = s2aDialer.DialContext(timeoutCtx, network, addr) + return err + }) if err != nil { grpclog.Infof("error dialing with S2A to %s: %v", addr, err) return fallback(err) diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go index 94feafb9..fcdbc162 100644 --- a/vendor/github.com/google/s2a-go/s2a_options.go +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -26,6 +26,7 @@ import ( "github.com/google/s2a-go/fallback" "github.com/google/s2a-go/stream" + "google.golang.org/grpc/credentials" s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" ) @@ -92,6 +93,9 @@ type ClientOptions struct { LocalIdentity Identity // S2AAddress is the address of the S2A. S2AAddress string + // Optional transport credentials. + // If set, this will be used for the gRPC connection to the S2A server. + TransportCreds credentials.TransportCredentials // EnsureProcessSessionTickets waits for all session tickets to be sent to // S2A before a process completes. // @@ -173,6 +177,9 @@ type ServerOptions struct { LocalIdentities []Identity // S2AAddress is the address of the S2A. S2AAddress string + // Optional transport credentials. + // If set, this will be used for the gRPC connection to the S2A server. + TransportCreds credentials.TransportCredentials // If true, enables the use of legacy S2Av1. EnableLegacyMode bool // VerificationMode specifies the mode that S2A must use to verify the diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem new file mode 100644 index 00000000..60c4cf06 --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx +CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD +VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy +MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl +c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn +3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO +7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb +A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T +cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO +VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL +dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI +je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+ +l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8 +YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM +E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK +BTq2PBvc59T6OFLq +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem new file mode 100644 index 00000000..9d112d1e --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH +65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM +FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0 +rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6 +M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf +2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0 +ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA +NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8 +LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT +EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW +/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+ +XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI +wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU +lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC +k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK +UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB +8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o +4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB +Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs +FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv +r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap +CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6 +w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr +63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8 +Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r +fVMrcL3jSf/W1Xh4TgtyoU8= +-----END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem new file mode 100644 index 00000000..44e436f6 --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx +GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00 +MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE +CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW +QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2 +r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg +Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n +rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe +d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR +MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/ +yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA +A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN +XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x +fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN +9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa +VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G +MTV7jmY9TBPtfhRuO/cG650+F+cw +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem new file mode 100644 index 00000000..68c60613 --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx +GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00 +MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE +AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh +HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn +H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK +GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA +Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe +LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA +AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G +A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB +AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT +EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn +JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe +2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs +HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI +4Wcvfz/isxgmH1UqIt3oc6ad +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem new file mode 100644 index 00000000..b14ad0f7 --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92 +GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza +ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5 +KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7 +CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6 +CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd +Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz +yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/ +Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V +4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY +QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy +0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp +2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms +GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz +wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ +SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2 +cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f +R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn +htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi +AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw +O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh +cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40 +EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw +SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x +gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe +1ChfPP1AH+/75MJCvu6wQBQv +-----END PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem new file mode 100644 index 00000000..ad1bad59 --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x +CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE +AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00 +MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE +CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe +2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW +HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB +cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4 +5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z +ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA +ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG +lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+ +XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt +Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9 +ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU +gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A== +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem new file mode 100644 index 00000000..bcf08e4f --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS +xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR +4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS +n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp +B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7 +vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN +DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S +pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj +ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF +p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn +r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B +7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ +Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5 +fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz +1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk +emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP +ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw +Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF +vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD +B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh +eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi +elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8 +Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo +mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk +k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ +8x3gNkxJRb4NaLIoNzAhCoo= +-----END PRIVATE KEY----- diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60..00000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 00000000..7ec5ac7e --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,41 @@ +# Changelog + +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + +## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) + + +### Features + +* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29)) + +## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26) + + +### Features + +* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4)) + +### Fixes + +* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior) + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f..a502fdc5 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as described in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f..3e9a6188 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4be..dc60082d 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc..b2a0bc87 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go index e6ef06cd..c3511292 100644 --- a/vendor/github.com/google/uuid/time.go +++ b/vendor/github.com/google/uuid/time.go @@ -108,12 +108,23 @@ func setClockSequence(seq int) { } // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. +// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs. func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) + var t Time + switch uuid.Version() { + case 6: + time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110 + t = Time(time) + case 7: + time := binary.BigEndian.Uint64(uuid[:8]) + t = Time((time>>16)*10000 + g1582ns100) + default: // forward compatible + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + t = Time(time) + } + return t } // ClockSequence returns the clock sequence encoded in uuid. diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207ae..5232b486 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool { return ok } -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both +// the standard UUID forms defined in RFC 4122 +// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition, +// Parse accepts non-standard strings such as the raw hex encoding +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings, +// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are +// examined in the latter case. Parse should not be used to validate strings as +// it parses non-standard encodings as indicated above. func Parse(s string) (UUID, error) { var uuid UUID switch len(s) { @@ -69,7 +73,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +105,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +122,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +150,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -180,6 +186,59 @@ func Must(uuid UUID, err error) UUID { return uuid } +// Validate returns an error if s is not a properly formatted UUID in one of the following formats: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} +// It returns an error if the format is invalid, otherwise nil. +func Validate(s string) error { + switch len(s) { + // Standard UUID format + case 36: + + // UUID with "urn:uuid:" prefix + case 36 + 9: + if !strings.EqualFold(s[:9], "urn:uuid:") { + return fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // UUID enclosed in braces + case 36 + 2: + if s[0] != '{' || s[len(s)-1] != '}' { + return fmt.Errorf("invalid bracketed UUID format") + } + s = s[1 : len(s)-1] + + // UUID without hyphens + case 32: + for i := 0; i < len(s); i += 2 { + _, ok := xtob(s[i], s[i+1]) + if !ok { + return errors.New("invalid UUID format") + } + } + + default: + return invalidLengthError{len(s)} + } + + // Check for standard UUID format + if len(s) == 36 { + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return errors.New("invalid UUID format") + } + for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} { + if _, ok := xtob(s[x], s[x+1]); !ok { + return errors.New("invalid UUID format") + } + } + } + + return nil +} + // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // , or "" if uuid is invalid. func (uuid UUID) String() string { @@ -292,3 +351,15 @@ func DisableRandPool() { poolMu.Lock() poolPos = randPoolSize } + +// UUIDs is a slice of UUID types. +type UUIDs []UUID + +// Strings returns a string slice containing the string form of each UUID in uuids. +func (uuids UUIDs) Strings() []string { + var uuidStrs = make([]string, len(uuids)) + for i, uuid := range uuids { + uuidStrs[i] = uuid.String() + } + return uuidStrs +} diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go new file mode 100644 index 00000000..339a959a --- /dev/null +++ b/vendor/github.com/google/uuid/version6.go @@ -0,0 +1,56 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "encoding/binary" + +// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality. +// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs. +// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6 +// +// NewV6 returns a Version 6 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewV6 returns Nil and an error. +func NewV6() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_high | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | time_mid | time_low_and_version | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |clk_seq_hi_res | clk_seq_low | node (0-1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | node (2-5) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + + binary.BigEndian.PutUint64(uuid[0:], uint64(now)) + binary.BigEndian.PutUint16(uuid[8:], seq) + + uuid[6] = 0x60 | (uuid[6] & 0x0F) + uuid[8] = 0x80 | (uuid[8] & 0x3F) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go new file mode 100644 index 00000000..3167b643 --- /dev/null +++ b/vendor/github.com/google/uuid/version7.go @@ -0,0 +1,104 @@ +// Copyright 2023 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// UUID version 7 features a time-ordered value field derived from the widely +// implemented and well known Unix Epoch timestamp source, +// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded. +// As well as improved entropy characteristics over versions 1 or 6. +// +// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7 +// +// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible. +// +// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch). +// Uses the randomness pool if it was enabled with EnableRandPool. +// On error, NewV7 returns Nil and an error +func NewV7() (UUID, error) { + uuid, err := NewRandom() + if err != nil { + return uuid, err + } + makeV7(uuid[:]) + return uuid, nil +} + +// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch). +// it use NewRandomFromReader fill random bits. +// On error, NewV7FromReader returns Nil and an error. +func NewV7FromReader(r io.Reader) (UUID, error) { + uuid, err := NewRandomFromReader(r) + if err != nil { + return uuid, err + } + + makeV7(uuid[:]) + return uuid, nil +} + +// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) +// uuid[8] already has the right version number (Variant is 10) +// see function NewV7 and NewV7FromReader +func makeV7(uuid []byte) { + /* + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | unix_ts_ms | ver | rand_a (12 bit seq) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |var| rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | rand_b | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + _ = uuid[15] // bounds check + + t, s := getV7Time() + + uuid[0] = byte(t >> 40) + uuid[1] = byte(t >> 32) + uuid[2] = byte(t >> 24) + uuid[3] = byte(t >> 16) + uuid[4] = byte(t >> 8) + uuid[5] = byte(t) + + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq +} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go index b3283b81..ea5beb5a 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -35,6 +35,8 @@ import ( const signAPI = "EnterpriseCertSigner.Sign" const certificateChainAPI = "EnterpriseCertSigner.CertificateChain" const publicKeyAPI = "EnterpriseCertSigner.Public" +const encryptAPI = "EnterpriseCertSigner.Encrypt" +const decryptAPI = "EnterpriseCertSigner.Decrypt" // A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser. type Connection struct { @@ -54,13 +56,28 @@ func (c *Connection) Close() error { func init() { gob.Register(crypto.SHA256) + gob.Register(crypto.SHA384) + gob.Register(crypto.SHA512) gob.Register(&rsa.PSSOptions{}) + gob.Register(&rsa.OAEPOptions{}) } -// SignArgs contains arguments to a crypto Signer.Sign method. +// SignArgs contains arguments for a Sign API call. type SignArgs struct { Digest []byte // The content to sign. - Opts crypto.SignerOpts // Options for signing, such as Hash identifier. + Opts crypto.SignerOpts // Options for signing. Must implement HashFunc(). +} + +// EncryptArgs contains arguments for an Encrypt API call. +type EncryptArgs struct { + Plaintext []byte // The plaintext to encrypt. + Opts any // Options for encryption. Ex: an instance of crypto.Hash. +} + +// DecryptArgs contains arguments to for a Decrypt API call. +type DecryptArgs struct { + Ciphertext []byte // The ciphertext to decrypt. + Opts crypto.DecrypterOpts // Options for decryption. Ex: an instance of *rsa.OAEPOptions. } // Key implements credential.Credential by holding the executed signer subprocess. @@ -98,7 +115,7 @@ func (k *Key) Public() crypto.PublicKey { return k.publicKey } -// Sign signs a message digest, using the specified signer options. +// Sign signs a message digest, using the specified signer opts. Implements crypto.Signer interface. func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) @@ -107,6 +124,18 @@ func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed [ return } +// Encrypt encrypts a plaintext msg into ciphertext, using the specified encrypt opts. +func (k *Key) Encrypt(_ io.Reader, msg []byte, opts any) (ciphertext []byte, err error) { + err = k.client.Call(encryptAPI, EncryptArgs{Plaintext: msg, Opts: opts}, &ciphertext) + return +} + +// Decrypt decrypts a ciphertext msg into plaintext, using the specified decrypter opts. Implements crypto.Decrypter interface. +func (k *Key) Decrypt(_ io.Reader, msg []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) { + err = k.client.Call(decryptAPI, DecryptArgs{Ciphertext: msg, Opts: opts}, &plaintext) + return +} + // ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, // possibly due to missing config or missing binary path. var ErrCredUnavailable = errors.New("Cred is unavailable") @@ -120,7 +149,12 @@ var ErrCredUnavailable = errors.New("Cred is unavailable") // The config file also specifies which certificate the signer should use. func Cred(configFilePath string) (*Key, error) { if configFilePath == "" { - configFilePath = util.GetDefaultConfigFilePath() + envFilePath := util.GetConfigFilePathFromEnv() + if envFilePath != "" { + configFilePath = envFilePath + } else { + configFilePath = util.GetDefaultConfigFilePath() + } } enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) if err != nil { diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go index 1640ec1c..f374a7f5 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -22,6 +22,7 @@ import ( "os/user" "path/filepath" "runtime" + "strings" ) const configFileName = "certificate_config.json" @@ -63,6 +64,9 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) { if signerBinaryPath == "" { return "", ErrConfigUnavailable } + + signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "~", guessHomeDir()) + signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "$HOME", guessHomeDir()) return signerBinaryPath, nil } @@ -89,3 +93,8 @@ func getDefaultConfigFileDirectory() (directory string) { func GetDefaultConfigFilePath() (path string) { return filepath.Join(getDefaultConfigFileDirectory(), configFileName) } + +// GetConfigFilePathFromEnv returns the path associated with environment variable GOOGLE_API_CERTIFICATE_CONFIG +func GetConfigFilePathFromEnv() (path string) { + return os.Getenv("GOOGLE_API_CERTIFICATE_CONFIG") +} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 91d60a80..ef508417 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.11.0" + "v2": "2.12.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index e17b196f..ae711494 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,13 @@ # Changelog +## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) + + +### Features + +* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68)) +* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1)) + ## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go new file mode 100644 index 00000000..af15fb58 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -0,0 +1,74 @@ +// Copyright 2023, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package callctx provides helpers for storing and retrieving values out of +// [context.Context]. These values are used by our client libraries in various +// ways across the stack. +package callctx + +import ( + "context" + "fmt" +) + +const ( + headerKey = contextKey("header") +) + +// contextKey is a private type used to store/retrieve context values. +type contextKey string + +// HeadersFromContext retrieves headers set from [SetHeaders]. These headers +// can then be cast to http.Header or metadata.MD to send along on requests. +func HeadersFromContext(ctx context.Context) map[string][]string { + m, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + return nil + } + return m +} + +// SetHeaders stores key value pairs in the returned context that can later +// be retrieved by [HeadersFromContext]. Values stored in this manner will +// automatically be retrieved by client libraries and sent as outgoing headers +// on all requests. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +func SetHeaders(ctx context.Context, keyvals ...string) context.Context { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + h, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + h = make(map[string][]string) + } + for i := 0; i < len(keyvals); i = i + 2 { + h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) + } + return context.WithValue(ctx, headerKey, h) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 6488461f..453fab7e 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -31,9 +31,15 @@ package gax import ( "bytes" + "context" + "fmt" + "net/http" "runtime" "strings" "unicode" + + "github.com/googleapis/gax-go/v2/callctx" + "google.golang.org/grpc/metadata" ) var ( @@ -117,3 +123,46 @@ func XGoogHeader(keyval ...string) string { } return buf.String()[1:] } + +// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries +// only. +// +// InsertMetadataIntoOutgoingContext returns a new context that merges the +// provided keyvals metadata pairs with any existing metadata/headers in the +// provided context. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context { + return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) +} + +// BuildHeaders is for use by the Google Cloud Libraries only. +// +// BuildHeaders returns a new http.Header that merges the provided +// keyvals header pairs with any existing metadata/headers in the provided +// context. keyvals should have a corresponding value for every key provided. +// If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func BuildHeaders(ctx context.Context, keyvals ...string) http.Header { + return http.Header(insertMetadata(ctx, keyvals...)) +} + +func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + out, ok := metadata.FromOutgoingContext(ctx) + if !ok { + out = metadata.MD(make(map[string][]string)) + } + headers := callctx.HeadersFromContext(ctx) + for k, v := range headers { + out[k] = append(out[k], v...) + } + for i := 0; i < len(keyvals); i = i + 2 { + out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + } + return out +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 374dcdb1..7425b5ff 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.11.0" +const Version = "2.12.0" diff --git a/vendor/github.com/jackc/pgconn/CHANGELOG.md b/vendor/github.com/jackc/pgconn/CHANGELOG.md index 3550b437..36dcdae9 100644 --- a/vendor/github.com/jackc/pgconn/CHANGELOG.md +++ b/vendor/github.com/jackc/pgconn/CHANGELOG.md @@ -1,3 +1,9 @@ +# 1.14.1 (July 19, 2023) + +* Fix: Enable failover efforts when pg_hba.conf disallows non-ssl connections (Brandon Kauffman) +* Fix: connect_timeout is not obeyed for sslmode=allow|prefer (smaher-edb) +* Optimize redundant pgpass parsing in case password is explicitly set (Aleksandr Alekseev) + # 1.14.0 (February 11, 2023) * Fix: each connection attempt to new node gets own timeout (Nathan Giardina) diff --git a/vendor/github.com/jackc/pgconn/config.go b/vendor/github.com/jackc/pgconn/config.go index 4080f2c6..36b74c4a 100644 --- a/vendor/github.com/jackc/pgconn/config.go +++ b/vendor/github.com/jackc/pgconn/config.go @@ -366,9 +366,9 @@ func ParseConfigWithOptions(connString string, options ParseConfigOptions) (*Con config.TLSConfig = fallbacks[0].TLSConfig config.Fallbacks = fallbacks[1:] - passfile, err := pgpassfile.ReadPassfile(settings["passfile"]) - if err == nil { - if config.Password == "" { + if config.Password == "" { + passfile, err := pgpassfile.ReadPassfile(settings["passfile"]) + if err == nil { host := config.Host if network, _ := NetworkAddress(config.Host, config.Port); network == "unix" { host = "localhost" diff --git a/vendor/github.com/jackc/pgconn/pgconn.go b/vendor/github.com/jackc/pgconn/pgconn.go index 6601194c..e5313030 100644 --- a/vendor/github.com/jackc/pgconn/pgconn.go +++ b/vendor/github.com/jackc/pgconn/pgconn.go @@ -156,12 +156,15 @@ func ConnectConfig(octx context.Context, config *Config) (pgConn *PgConn, err er foundBestServer := false var fallbackConfig *FallbackConfig - for _, fc := range fallbackConfigs { + for i, fc := range fallbackConfigs { // ConnectTimeout restricts the whole connection process. if config.ConnectTimeout != 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(octx, config.ConnectTimeout) - defer cancel() + // create new context first time or when previous host was different + if i == 0 || (fallbackConfigs[i].Host != fallbackConfigs[i-1].Host) { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(octx, config.ConnectTimeout) + defer cancel() + } } else { ctx = octx } @@ -176,7 +179,7 @@ func ConnectConfig(octx context.Context, config *Config) (pgConn *PgConn, err er const ERRCODE_INVALID_CATALOG_NAME = "3D000" // db does not exist const ERRCODE_INSUFFICIENT_PRIVILEGE = "42501" // missing connect privilege if pgerr.Code == ERRCODE_INVALID_PASSWORD || - pgerr.Code == ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION || + pgerr.Code == ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION && fc.TLSConfig != nil || pgerr.Code == ERRCODE_INVALID_CATALOG_NAME || pgerr.Code == ERRCODE_INSUFFICIENT_PRIVILEGE { break @@ -599,9 +602,10 @@ func (pgConn *PgConn) PID() uint32 { // TxStatus returns the current TxStatus as reported by the server in the ReadyForQuery message. // // Possible return values: -// 'I' - idle / not in transaction -// 'T' - in a transaction -// 'E' - in a failed transaction +// +// 'I' - idle / not in transaction +// 'T' - in a transaction +// 'E' - in a failed transaction // // See https://www.postgresql.org/docs/current/protocol-message-formats.html. func (pgConn *PgConn) TxStatus() byte { diff --git a/vendor/github.com/jackc/pgservicefile/.travis.yml b/vendor/github.com/jackc/pgservicefile/.travis.yml deleted file mode 100644 index e176228e..00000000 --- a/vendor/github.com/jackc/pgservicefile/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.x - - tip - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/jackc/pgservicefile/README.md b/vendor/github.com/jackc/pgservicefile/README.md index e50ca126..2fc7e012 100644 --- a/vendor/github.com/jackc/pgservicefile/README.md +++ b/vendor/github.com/jackc/pgservicefile/README.md @@ -1,5 +1,6 @@ -[![](https://godoc.org/github.com/jackc/pgservicefile?status.svg)](https://godoc.org/github.com/jackc/pgservicefile) -[![Build Status](https://travis-ci.org/jackc/pgservicefile.svg)](https://travis-ci.org/jackc/pgservicefile) +[![Go Reference](https://pkg.go.dev/badge/github.com/jackc/pgservicefile.svg)](https://pkg.go.dev/github.com/jackc/pgservicefile) +[![Build Status](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml/badge.svg)](https://github.com/jackc/pgservicefile/actions/workflows/ci.yml) + # pgservicefile diff --git a/vendor/github.com/jackc/pgtype/CHANGELOG.md b/vendor/github.com/jackc/pgtype/CHANGELOG.md index a362a1df..9e141f77 100644 --- a/vendor/github.com/jackc/pgtype/CHANGELOG.md +++ b/vendor/github.com/jackc/pgtype/CHANGELOG.md @@ -1,3 +1,9 @@ +# 1.14.1 (January 12, 2024) + +* Backport fix numeric to string conversion for small negative values +* Fix EncodeValueText (horpto) +* Fix JSON.UnmarshalJSON to make copy of byte slice (horpto) + # 1.14.0 (February 11, 2023) * Fix: BC timestamp text format support (jozeflami) diff --git a/vendor/github.com/jackc/pgtype/array.go b/vendor/github.com/jackc/pgtype/array.go index 174007c1..4c06686b 100644 --- a/vendor/github.com/jackc/pgtype/array.go +++ b/vendor/github.com/jackc/pgtype/array.go @@ -285,7 +285,7 @@ func arrayParseQuotedValue(buf *bytes.Buffer) (string, bool, error) { return "", false, err } case '"': - r, _, err = buf.ReadRune() + _, _, err = buf.ReadRune() if err != nil { return "", false, err } diff --git a/vendor/github.com/jackc/pgtype/composite_type.go b/vendor/github.com/jackc/pgtype/composite_type.go index 32e0aa26..c17d6a51 100644 --- a/vendor/github.com/jackc/pgtype/composite_type.go +++ b/vendor/github.com/jackc/pgtype/composite_type.go @@ -598,12 +598,10 @@ func (b *CompositeBinaryBuilder) Finish() ([]byte, error) { } type CompositeTextBuilder struct { - ci *ConnInfo - buf []byte - startIdx int - fieldCount uint32 - err error - fieldBuf [32]byte + ci *ConnInfo + buf []byte + err error + fieldBuf [32]byte } func NewCompositeTextBuilder(ci *ConnInfo, buf []byte) *CompositeTextBuilder { diff --git a/vendor/github.com/jackc/pgtype/convert.go b/vendor/github.com/jackc/pgtype/convert.go index 377fe3ea..305491c1 100644 --- a/vendor/github.com/jackc/pgtype/convert.go +++ b/vendor/github.com/jackc/pgtype/convert.go @@ -240,12 +240,7 @@ func int64AssignTo(srcVal int64, srcStatus Status, dst interface{}) error { } *v = int32(srcVal) case *int64: - if srcVal < math.MinInt64 { - return fmt.Errorf("%d is less than minimum value for int64", srcVal) - } else if srcVal > math.MaxInt64 { - return fmt.Errorf("%d is greater than maximum value for int64", srcVal) - } - *v = int64(srcVal) + *v = srcVal case *uint: if srcVal < 0 { return fmt.Errorf("%d is less than zero for uint", srcVal) @@ -262,7 +257,7 @@ func int64AssignTo(srcVal int64, srcStatus Status, dst interface{}) error { *v = uint8(srcVal) case *uint16: if srcVal < 0 { - return fmt.Errorf("%d is less than zero for uint32", srcVal) + return fmt.Errorf("%d is less than zero for uint16", srcVal) } else if srcVal > math.MaxUint16 { return fmt.Errorf("%d is greater than maximum value for uint16", srcVal) } diff --git a/vendor/github.com/jackc/pgtype/database_sql.go b/vendor/github.com/jackc/pgtype/database_sql.go index 9d1cf822..d0e36f84 100644 --- a/vendor/github.com/jackc/pgtype/database_sql.go +++ b/vendor/github.com/jackc/pgtype/database_sql.go @@ -30,7 +30,8 @@ func DatabaseSQLValue(ci *ConnInfo, src Value) (interface{}, error) { } func EncodeValueText(src TextEncoder) (interface{}, error) { - buf, err := src.EncodeText(nil, make([]byte, 0, 32)) + var encBuf [36]byte + buf, err := src.EncodeText(nil, encBuf[:0]) if err != nil { return nil, err } diff --git a/vendor/github.com/jackc/pgtype/hstore.go b/vendor/github.com/jackc/pgtype/hstore.go index e42b7551..e91f3da8 100644 --- a/vendor/github.com/jackc/pgtype/hstore.go +++ b/vendor/github.com/jackc/pgtype/hstore.go @@ -416,7 +416,7 @@ func parseHstore(s string) (k []string, v []Text, err error) { case end: err = errors.New("Found EOS after ',', expecting space") case (unicode.IsSpace(r)): - r, end = p.Consume() + p.Consume() state = hsKey default: err = fmt.Errorf("Invalid character '%c' after ', ', expecting \"", r) diff --git a/vendor/github.com/jackc/pgtype/int8.go b/vendor/github.com/jackc/pgtype/int8.go index 0e089979..1c1fca77 100644 --- a/vendor/github.com/jackc/pgtype/int8.go +++ b/vendor/github.com/jackc/pgtype/int8.go @@ -50,12 +50,6 @@ func (dst *Int8) Set(src interface{}) error { } *dst = Int8{Int: int64(value), Status: Present} case int: - if int64(value) < math.MinInt64 { - return fmt.Errorf("%d is greater than maximum value for Int8", value) - } - if int64(value) > math.MaxInt64 { - return fmt.Errorf("%d is greater than maximum value for Int8", value) - } *dst = Int8{Int: int64(value), Status: Present} case uint: if uint64(value) > math.MaxInt64 { diff --git a/vendor/github.com/jackc/pgtype/json.go b/vendor/github.com/jackc/pgtype/json.go index a9508bdd..0aa0805c 100644 --- a/vendor/github.com/jackc/pgtype/json.go +++ b/vendor/github.com/jackc/pgtype/json.go @@ -202,8 +202,9 @@ func (dst *JSON) UnmarshalJSON(b []byte) error { if b == nil || string(b) == "null" { *dst = JSON{Status: Null} } else { - *dst = JSON{Bytes: b, Status: Present} + bCopy := make([]byte, len(b)) + copy(bCopy, b) + *dst = JSON{Bytes: bCopy, Status: Present} } return nil - } diff --git a/vendor/github.com/jackc/pgtype/numeric.go b/vendor/github.com/jackc/pgtype/numeric.go index 1f32b36b..5999eaf5 100644 --- a/vendor/github.com/jackc/pgtype/numeric.go +++ b/vendor/github.com/jackc/pgtype/numeric.go @@ -448,11 +448,15 @@ func (src *Numeric) toFloat64() (float64, error) { return math.Inf(-1), nil } + if src.Exp == 1 { + return float64(src.Int.Int64()), nil + } + buf := make([]byte, 0, 32) - buf = append(buf, src.Int.String()...) + buf = src.Int.Append(buf, 10) buf = append(buf, 'e') - buf = append(buf, strconv.FormatInt(int64(src.Exp), 10)...) + buf = strconv.AppendInt(buf, int64(src.Exp), 10) f, err := strconv.ParseFloat(string(buf), 64) if err != nil { @@ -488,20 +492,20 @@ func (dst *Numeric) DecodeText(ci *ConnInfo, src []byte) error { } func parseNumericString(str string) (n *big.Int, exp int32, err error) { - parts := strings.SplitN(str, ".", 2) - digits := strings.Join(parts, "") + idx := strings.IndexByte(str, '.') - if len(parts) > 1 { - exp = int32(-len(parts[1])) - } else { - for len(digits) > 1 && digits[len(digits)-1] == '0' && digits[len(digits)-2] != '-' { - digits = digits[:len(digits)-1] + if idx == -1 { + for len(str) > 1 && str[len(str)-1] == '0' && str[len(str)-2] != '-' { + str = str[:len(str)-1] exp++ } + } else { + exp = int32(-(len(str) - idx - 1)) + str = str[:idx] + str[idx+1:] } accum := &big.Int{} - if _, ok := accum.SetString(digits, 10); !ok { + if _, ok := accum.SetString(str, 10); !ok { return nil, 0, fmt.Errorf("%s is not a number", str) } @@ -825,6 +829,12 @@ func encodeNumericText(n Numeric, buf []byte) (newBuf []byte, err error) { func (n Numeric) numberTextBytes() []byte { intStr := n.Int.String() buf := &bytes.Buffer{} + + if len(intStr) > 0 && intStr[:1] == "-" { + intStr = intStr[1:] + buf.WriteByte('-') + } + exp := int(n.Exp) if exp > 0 { buf.WriteString(intStr) diff --git a/vendor/github.com/jackc/pgtype/point.go b/vendor/github.com/jackc/pgtype/point.go index 0c799106..debc16e5 100644 --- a/vendor/github.com/jackc/pgtype/point.go +++ b/vendor/github.com/jackc/pgtype/point.go @@ -45,7 +45,7 @@ func (dst *Point) Set(src interface{}) error { } func parsePoint(src []byte) (*Point, error) { - if src == nil || bytes.Compare(src, []byte("null")) == 0 { + if src == nil || bytes.Equal(src, []byte("null")) { return &Point{Status: Null}, nil } diff --git a/vendor/github.com/jackc/pgtype/uuid.go b/vendor/github.com/jackc/pgtype/uuid.go index 6839c052..03cf23b2 100644 --- a/vendor/github.com/jackc/pgtype/uuid.go +++ b/vendor/github.com/jackc/pgtype/uuid.go @@ -84,7 +84,7 @@ func (src *UUID) AssignTo(dst interface{}) error { copy(*v, src.Bytes[:]) return nil case *string: - *v = encodeUUID(src.Bytes) + *v = string(encodeUUID(src.Bytes)) return nil default: if nextDst, retry := GetAssignToDstType(v); retry { @@ -100,28 +100,48 @@ func (src *UUID) AssignTo(dst interface{}) error { // parseUUID converts a string UUID in standard form to a byte array. func parseUUID(src string) (dst [16]byte, err error) { + var uuidBuf [32]byte + srcBuf := uuidBuf[:] + switch len(src) { case 36: - src = src[0:8] + src[9:13] + src[14:18] + src[19:23] + src[24:] + copy(srcBuf[0:8], src[:8]) + copy(srcBuf[8:12], src[9:13]) + copy(srcBuf[12:16], src[14:18]) + copy(srcBuf[16:20], src[19:23]) + copy(srcBuf[20:], src[24:]) case 32: // dashes already stripped, assume valid + copy(srcBuf, src) + default: // assume invalid. return dst, fmt.Errorf("cannot parse UUID %v", src) } - buf, err := hex.DecodeString(src) + _, err = hex.Decode(dst[:], srcBuf) if err != nil { return dst, err } - - copy(dst[:], buf) return dst, err } // encodeUUID converts a uuid byte array to UUID standard string form. -func encodeUUID(src [16]byte) string { - return fmt.Sprintf("%x-%x-%x-%x-%x", src[0:4], src[4:6], src[6:8], src[8:10], src[10:16]) +func encodeUUID(src [16]byte) (dst []byte) { + var buf [36]byte + dst = buf[:] + + hex.Encode(dst, src[:4]) + buf[8] = '-' + hex.Encode(dst[9:13], src[4:6]) + buf[13] = '-' + hex.Encode(dst[14:18], src[6:8]) + buf[18] = '-' + hex.Encode(dst[19:23], src[8:10]) + buf[23] = '-' + hex.Encode(dst[24:], src[10:]) + + return } func (dst *UUID) DecodeText(ci *ConnInfo, src []byte) error { @@ -209,7 +229,7 @@ func (src UUID) MarshalJSON() ([]byte, error) { case Present: var buff bytes.Buffer buff.WriteByte('"') - buff.WriteString(encodeUUID(src.Bytes)) + buff.Write(encodeUUID(src.Bytes)) buff.WriteByte('"') return buff.Bytes(), nil case Null: @@ -221,7 +241,7 @@ func (src UUID) MarshalJSON() ([]byte, error) { } func (dst *UUID) UnmarshalJSON(src []byte) error { - if bytes.Compare(src, []byte("null")) == 0 { + if bytes.Equal(src, []byte("null")) { return dst.Set(nil) } if len(src) != 38 { diff --git a/vendor/github.com/klauspost/asmfmt/.gitignore b/vendor/github.com/klauspost/asmfmt/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/klauspost/asmfmt/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/klauspost/asmfmt/.goreleaser.yml b/vendor/github.com/klauspost/asmfmt/.goreleaser.yml deleted file mode 100644 index 1ce056a7..00000000 --- a/vendor/github.com/klauspost/asmfmt/.goreleaser.yml +++ /dev/null @@ -1,77 +0,0 @@ -builds: - - - id: "asmfmt" - binary: asmfmt - main: ./cmd/asmfmt/main.go - env: - - CGO_ENABLED=0 - flags: - - -ldflags=-s -w - - -trimpath - goos: - - aix - - linux - - freebsd - - netbsd - - windows - - darwin - goarch: - - 386 - - amd64 - - arm - - arm64 - - ppc64 - - ppc64le - - mips64 - - mips64le - goarm: - - 7 - -archives: - - - id: asmfmt - name_template: "asmfmt-{{ .Os }}_{{ .Arch }}_{{ .Version }}" - replacements: - aix: AIX - darwin: OSX - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 - freebsd: FreeBSD - netbsd: NetBSD - format_overrides: - - goos: windows - format: zip - files: - - LICENSE -checksum: - name_template: 'checksums.txt' -snapshot: - name_template: "{{ .Tag }}-next" -changelog: - sort: asc - filters: - exclude: - - '^doc:' - - '^docs:' - - '^test:' - - '^tests:' - - '^Update\sREADME.md' - -nfpms: - - - file_name_template: "asmfmt_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" - vendor: Klaus Post - homepage: https://github.com/klauspost/asmfmt - maintainer: Klaus Post - description: asmfmt tool - license: BSD 3-Clause - formats: - - deb - - rpm - replacements: - darwin: Darwin - linux: Linux - freebsd: FreeBSD - amd64: x86_64 diff --git a/vendor/github.com/klauspost/asmfmt/LICENSE b/vendor/github.com/klauspost/asmfmt/LICENSE deleted file mode 100644 index 5cec7ee9..00000000 --- a/vendor/github.com/klauspost/asmfmt/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/klauspost/asmfmt/README.md b/vendor/github.com/klauspost/asmfmt/README.md deleted file mode 100644 index f81f4a5c..00000000 --- a/vendor/github.com/klauspost/asmfmt/README.md +++ /dev/null @@ -1,113 +0,0 @@ -# asmfmt -Go Assembler Formatter - -This will format your assembler code in a similar way that `gofmt` formats your Go code. - -Read Introduction: [asmfmt: Go Assembler Formatter](https://blog.klauspost.com/asmfmt-assembler-formatter/) - -[![Go Reference](https://pkg.go.dev/badge/klauspost/asmfmt.svg)](https://pkg.go.dev/klauspost/asmfmt) -[![Go](https://github.com/klauspost/asmfmt/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/asmfmt/actions/workflows/go.yml) - -See [Example 1](https://files.klauspost.com/diff.html), [Example 2](https://files.klauspost.com/diff2.html), [Example 3](https://files.klauspost.com/diff3.html), or compare files in the [testdata folder](https://github.com/klauspost/asmfmt/tree/master/testdata). - -Status: STABLE. The format will only change if bugs are found. Please report any feedback in the issue section. - -# install - -Binaries can be downloaded from [Releases](https://github.com/klauspost/asmfmt/releases). Unpack the file into your executable path. - -To install the standalone formatter from source using Go framework: `go install github.com/klauspost/asmfmt/cmd/asmfmt@latest`. - -# updates - -* Apr 8, 2021: Add modules info and remove other than main tools. -* Jan 6, 2021: Fix C comments before line comments like `VPCMPEQB Y8/*(DI)*/, Y0, Y1 // comment...` -* Aug 8, 2016: Don't indent comments before non-indented instruction. -* Jun 10, 2016: Fixed crash with end-of-line comments that contained an end-of-block `/*` part. -* Apr 14, 2016: Fix end of multiline comments in macro definitions. -* Apr 14, 2016: Updated tools to Go 1.5+ -* Dec 21, 2015: Space before semi-colons in macro definitions is now trimmed. -* Dec 21, 2015: Fix line comments in macro definitions (only valid with Go 1.5). -* Dec 17, 2015: Comments are better aligned to the following section. -* Dec 17, 2015: Clean semi-colons in multiple instruction per line. - -# goland - -To set up a custom File Watcher in Goland, - -* Go to Settings -> Tools -> File Watchers -* Press **+** and choose `` template. -* Name it `asmfmt` -* File Type, Select `x86 Plan 9 Assembly file` (it will apply to all platforms) -* Scope: `Project Files` -* Arguments: `$FilePath$`. -* Output Paths to Refresh: `$FilePath$` -* Working Directory: `$ProjectFileDir$` - -Advanced options, Enable: - -* [x] Trigger the watcher regardless of syntax errors (IMPORTANT) -* [x] Create output file from stdout - -Disable the rest. - -![Goland Configuration](https://user-images.githubusercontent.com/5663952/114158973-96eebc80-9925-11eb-9aea-703ce474a7bb.png) - - -# emacs - -To automatically format assembler, in `.emacs` add: - -``` -(defun asm-mode-setup () - (set (make-local-variable 'gofmt-command) "asmfmt") - (add-hook 'before-save-hook 'gofmt nil t) -) - -(add-hook 'asm-mode-hook 'asm-mode-setup) -``` - -# usage - -`asmfmt [flags] [path ...]` - -The flags are similar to `gofmt`, except it will only process `.s` files: -``` - -d - Do not print reformatted sources to standard output. - If a file's formatting is different than asmfmt's, print diffs - to standard output. - -e - Print all (including spurious) errors. - -l - Do not print reformatted sources to standard output. - If a file's formatting is different from asmfmt's, print its name - to standard output. - -w - Do not print reformatted sources to standard output. - If a file's formatting is different from asmfmt's, overwrite it - with asmfmt's version. -``` -You should only run `asmfmt` on files that are assembler files. Assembler files cannot be positively identified, so it will mangle non-assembler files. - -# formatting - -* Automatic indentation. -* It uses tabs for indentation and blanks for alignment. -* It will remove trailing whitespace. -* It will align the first parameter. -* It will align all comments in a block. -* It will eliminate multiple blank lines. -* Removes `;` at end of line. -* Forced newline before comments, except when preceded by label or another comment. -* Forced newline before labels, except when preceded by comment. -* Labels are on a separate lines, except for comments. -* Retains block breaks (newline between blocks). -* It will convert single line block comments to line comments. -* Line comments have a space after `//`, except if comment starts with `+`. -* There is always a space between parameters. -* Macros in the same file are tracked, and not included in parameter indentation. -* `TEXT`, `DATA` and `GLOBL`, `FUNCDATA`, `PCDATA` and labels are level 0 indentation. -* Aligns `\` in multiline macros. -* Whitespace before separating `;` is removed. Space is inserted after, if followed by another instruction. - diff --git a/vendor/github.com/klauspost/asmfmt/asmfmt.go b/vendor/github.com/klauspost/asmfmt/asmfmt.go deleted file mode 100644 index d191745b..00000000 --- a/vendor/github.com/klauspost/asmfmt/asmfmt.go +++ /dev/null @@ -1,652 +0,0 @@ -package asmfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - "unicode" -) - -// Format the input and return the formatted data. -// If any error is encountered, no data will be returned. -func Format(in io.Reader) ([]byte, error) { - src := bufio.NewReaderSize(in, 512<<10) - dst := &bytes.Buffer{} - state := fstate{out: dst, defines: make(map[string]struct{})} - for { - data, _, err := src.ReadLine() - if err == io.EOF { - state.flush() - break - } - if err != nil { - return nil, err - } - err = state.addLine(data) - if err != nil { - return nil, err - } - } - return dst.Bytes(), nil -} - -type fstate struct { - out *bytes.Buffer - insideBlock bool // Block comment - indentation int // Indentation level - lastEmpty bool - lastComment bool - lastStar bool // Block comment, last line started with a star. - lastLabel bool - anyContents bool - lastContinued bool // Last line continued - queued []statement - comments []string - defines map[string]struct{} -} - -type statement struct { - instruction string - params []string // Parameters - comment string // Without slashes - function bool // Probably define call - continued bool // Multiline statement, continues on next line - contComment bool // Multiline statement, comment only -} - -// Add a new input line. -// Since you are looking at ths code: -// This code has grown over a considerable amount of time, -// and deserves a rewrite with proper parsing instead of this hodgepodge. -// Its output is stable, and could be used as reference for a rewrite. -func (f *fstate) addLine(b []byte) error { - if bytes.Contains(b, []byte{0}) { - return fmt.Errorf("zero (0) byte in input. file is unlikely an assembler file") - } - s := string(b) - // Inside block comment - if f.insideBlock { - defer func() { - f.lastComment = true - }() - if strings.Contains(s, "*/") { - ends := strings.Index(s, "*/") - end := s[:ends] - if strings.HasPrefix(strings.TrimSpace(s), "*") && f.lastStar { - end = strings.TrimSpace(end) + " " - } - end = end + "*/" - f.insideBlock = false - s = strings.TrimSpace(s[ends+2:]) - if strings.HasSuffix(s, "\\") { - end = end + " \\" - if len(s) == 1 { - s = "" - } - } - f.out.WriteString(end + "\n") - if len(s) == 0 { - return nil - } - } else { - // Insert a space on lines that begin with '*' - if strings.HasPrefix(strings.TrimSpace(s), "*") { - s = strings.TrimSpace(s) - f.out.WriteByte(' ') - f.lastStar = true - } else { - f.lastStar = false - } - fmt.Fprintln(f.out, s) - return nil - } - } - s = strings.TrimSpace(s) - - // Comment is the the only line content. - if strings.HasPrefix(s, "//") { - // Non-comment content is now added. - defer func() { - f.anyContents = true - f.lastEmpty = false - f.lastStar = false - }() - - s = strings.TrimPrefix(s, "//") - if len(f.queued) > 0 { - f.flush() - } - // Newline before comments - if len(f.comments) == 0 { - f.newLine() - } - - // Preserve whitespace if the first character after the comment - // is a whitespace - ts := strings.TrimSpace(s) - var q string - if (ts != s && len(ts) > 0) || (len(s) > 0 && strings.ContainsAny(string(s[0]), `+/`)) || (len(s) >= 8 && s[:8] == "go:build") { - q = fmt.Sprint("//" + s) - } else if len(ts) > 0 { - // Insert a space before the comment - q = fmt.Sprint("// " + s) - } else { - q = fmt.Sprint("//") - } - f.comments = append(f.comments, q) - f.lastComment = true - return nil - } - - // Handle end-of blockcomments. - if strings.Contains(s, "/*") && !strings.HasSuffix(s, `\`) { - starts := strings.Index(s, "/*") - ends := strings.Index(s, "*/") - lineComment := strings.Index(s, "//") - if lineComment >= 0 { - if lineComment < starts { - goto exitcomm - } - if lineComment < ends && !f.insideBlock { - goto exitcomm - } - if ends > starts && ends < lineComment { - // If there is something left between the end and the line comment, keep it. - if len(strings.TrimSpace(s[ends:lineComment])) > 0 { - goto exitcomm - } - } - } - pre := s[:starts] - pre = strings.TrimSpace(pre) - - if len(pre) > 0 { - if strings.HasSuffix(s, `\`) { - goto exitcomm - } - // Add items before the comment section as a line. - if ends > starts && ends >= len(s)-2 { - comm := strings.TrimSpace(s[starts+2 : ends]) - return f.addLine([]byte(pre + " //" + comm)) - } - err := f.addLine([]byte(pre)) - if err != nil { - return err - } - } - - f.flush() - - // Convert single line /* comment */ to // Comment - if ends > starts && ends >= len(s)-2 { - return f.addLine([]byte("// " + strings.TrimSpace(s[starts+2:ends]))) - } - - // Comments inside multiline defines. - if strings.HasSuffix(s, `\`) { - f.indent() - s = strings.TrimSpace(strings.TrimSuffix(s, `\`)) + ` \` - } - - // Otherwise output - fmt.Fprint(f.out, "/*") - s = strings.TrimSpace(s[starts+2:]) - f.insideBlock = ends < 0 - f.lastComment = true - f.lastStar = true - if len(s) == 0 { - f.out.WriteByte('\n') - return nil - } - f.out.WriteByte(' ') - f.out.WriteString(s + "\n") - return nil - } -exitcomm: - - if len(s) == 0 { - f.flush() - - // No more than two empty lines in a row - // cannot start with NL - if f.lastEmpty || !f.anyContents { - return nil - } - if f.lastContinued { - f.indentation = 0 - f.lastContinued = false - } - f.lastEmpty = true - return f.out.WriteByte('\n') - } - - // Non-comment content is now added. - defer func() { - f.anyContents = true - f.lastEmpty = false - f.lastStar = false - f.lastComment = false - }() - - st := newStatement(s, f.defines) - if st == nil { - return nil - } - if def := st.define(); def != "" { - f.defines[def] = struct{}{} - } - if st.instruction == "package" { - if _, ok := f.defines["package"]; !ok { - return fmt.Errorf("package instruction found. Go files are not supported") - } - } - - // Move anything that isn't a comment to the next line - if st.isLabel() && len(st.params) > 0 && !st.continued { - idx := strings.Index(s, ":") - st = newStatement(s[:idx+1], f.defines) - defer f.addLine([]byte(s[idx+1:])) - } - - // Should this line be at level 0? - if st.level0() && !(st.continued && f.lastContinued) { - if st.isTEXT() && len(f.queued) == 0 && len(f.comments) > 0 { - f.indentation = 0 - } - f.flush() - - // Add newline before jump target. - f.newLine() - - f.indentation = 0 - f.queued = append(f.queued, *st) - f.flush() - - if !st.isPreProcessor() && !st.isGlobal() { - f.indentation = 1 - } - f.lastLabel = true - return nil - } - - defer func() { - f.lastLabel = false - }() - f.queued = append(f.queued, *st) - if st.isTerminator() || (f.lastContinued && !st.continued) { - // Terminators should always be at level 1 - f.indentation = 1 - f.flush() - f.indentation = 0 - } else if st.isCommand() { - // handles cases where a JMP/RET isn't a terminator - f.indentation = 1 - } - f.lastContinued = st.continued - return nil -} - -// indent the current line with current indentation. -func (f *fstate) indent() { - for i := 0; i < f.indentation; i++ { - f.out.WriteByte('\t') - } -} - -// flush any queued comments and commands -func (f *fstate) flush() { - for _, line := range f.comments { - f.indent() - fmt.Fprintln(f.out, line) - } - f.comments = nil - s := formatStatements(f.queued) - for _, line := range s { - f.indent() - fmt.Fprintln(f.out, line) - } - f.queued = nil -} - -// Add a newline, unless last line was empty or a comment -func (f *fstate) newLine() { - // Always newline before comment-only line. - if !f.lastEmpty && !f.lastComment && !f.lastLabel && f.anyContents { - f.out.WriteByte('\n') - } -} - -// newStatement will parse a line and return it as a statement. -// Will return nil if the line is empty after whitespace removal. -func newStatement(s string, defs map[string]struct{}) *statement { - s = strings.TrimSpace(s) - st := statement{} - - // Fix where a comment start if any - // We need to make sure that the comment isn't embedded in a string literal - startcom := strings.Index(s, "//") - startstr := strings.Index(s, "\"") - for endstr := 0; startcom > startstr && startstr > endstr; { - // This does not check for any escaping (i.e. "\"") - endstr = startstr + 1 + strings.Index(s[startstr+1:], "\"") - startcom = endstr + strings.Index(s[endstr:], "//") - if startcom < endstr { - startcom = 0 - } - startstr = endstr + 1 + strings.Index(s[endstr+1:], "\"") - } - if startcom > 0 { - st.comment = strings.TrimSpace(s[startcom+2:]) - s = strings.TrimSpace(s[:startcom]) - } - - // Split into fields - fields := strings.Fields(s) - if len(fields) < 1 { - return nil - } - st.instruction = fields[0] - - // Handle defined macro calls - if len(defs) > 0 { - inst := strings.Split(st.instruction, "(")[0] - if _, ok := defs[inst]; ok { - st.function = true - } - } - if strings.HasPrefix(s, "/*") { - st.function = true - } - // We may not have it defined as a macro, if defined in an external - // .h file, so we try to detect the remaining ones. - if strings.ContainsAny(st.instruction, "(_") { - st.function = true - } - if len(st.params) > 0 && strings.HasPrefix(st.params[0], "(") { - st.function = true - } - if st.function { - st.instruction = s - } - - if st.instruction == "\\" && len(st.comment) > 0 { - st.instruction = fmt.Sprintf("\\ // %s", st.comment) - st.comment = "" - st.function = true - st.continued = true - st.contComment = true - } - - s = strings.TrimPrefix(s, st.instruction) - st.instruction = strings.Replace(st.instruction, "\t", " ", -1) - s = strings.TrimSpace(s) - - st.setParams(s) - - // Remove trailing ; - if len(st.params) > 0 { - st.params[len(st.params)-1] = strings.TrimSuffix(st.params[len(st.params)-1], ";") - } else { - st.instruction = strings.TrimSuffix(st.instruction, ";") - } - - // Register line continuations. - if len(st.params) > 0 { - p := st.params[len(st.params)-1] - if st.willContinue() { - p = strings.TrimSuffix(st.params[len(st.params)-1], `\`) - p = strings.TrimSpace(p) - if len(p) > 0 { - st.params[len(st.params)-1] = p - } else { - st.params = st.params[:len(st.params)-1] - } - st.continued = true - } - } - if strings.HasSuffix(st.instruction, `\`) && !st.contComment { - i := strings.TrimSuffix(st.instruction, `\`) - st.instruction = strings.TrimSpace(i) - st.continued = true - } - - if len(st.params) == 0 && !st.isLabel() { - st.function = true - } - - return &st -} - -// setParams will add the string given as parameters. -// Inline comments are retained. -// There will be a space after ",", unless inside a comment. -// A tab is replaced by a space for consistent indentation. -func (st *statement) setParams(s string) { - st.params = make([]string, 0) - runes := []rune(s) - last := '\n' - inComment := false - inStringLiteral := false - inCharLiteral := false - out := make([]rune, 0, len(runes)) - for _, r := range runes { - switch r { - case '"': - if last != '\\' && inStringLiteral { - inStringLiteral = false - } else if last != '\\' && !inStringLiteral { - inStringLiteral = true - } - case '\'': - if last != '\\' && inCharLiteral { - inCharLiteral = false - } else if last != '\\' && !inCharLiteral { - inCharLiteral = true - } - case ',': - if inComment || inStringLiteral || inCharLiteral { - break - } - c := strings.TrimSpace(string(out)) - if len(c) > 0 { - st.params = append(st.params, c) - } - out = out[0:0] - continue - case '/': - if last == '*' && inComment { - inComment = false - } - case '*': - if last == '/' { - inComment = true - } - case '\t': - if !st.isPreProcessor() { - r = ' ' - } - case ';': - if inComment || inStringLiteral || inCharLiteral { - break - } - out = []rune(strings.TrimSpace(string(out)) + "; ") - last = r - continue - } - if last == ';' && unicode.IsSpace(r) { - continue - } - last = r - out = append(out, r) - } - c := strings.TrimSpace(string(out)) - if len(c) > 0 { - st.params = append(st.params, c) - } -} - -// Return true if this line should be at indentation level 0. -func (st statement) level0() bool { - return st.isLabel() || st.isTEXT() || st.isPreProcessor() -} - -// Will return true if the statement is a label. -func (st statement) isLabel() bool { - return strings.HasSuffix(st.instruction, ":") -} - -// isPreProcessor will return if the statement is a preprocessor statement. -func (st statement) isPreProcessor() bool { - return strings.HasPrefix(st.instruction, "#") -} - -// isGlobal returns true if the current instruction is -// a global. Currently that is DATA, GLOBL, FUNCDATA and PCDATA -func (st statement) isGlobal() bool { - up := strings.ToUpper(st.instruction) - switch up { - case "DATA", "GLOBL", "FUNCDATA", "PCDATA": - return true - default: - return false - } -} - -// isTEXT returns true if the instruction is "TEXT" -// or one of the "isGlobal" types -func (st statement) isTEXT() bool { - up := strings.ToUpper(st.instruction) - return up == "TEXT" || st.isGlobal() -} - -// We attempt to identify "terminators", after which -// indentation is likely to be level 0. -func (st statement) isTerminator() bool { - up := strings.ToUpper(st.instruction) - return up == "RET" || up == "JMP" -} - -// Detects commands based on case. -func (st statement) isCommand() bool { - if st.isLabel() { - return false - } - up := strings.ToUpper(st.instruction) - return up == st.instruction -} - -// Detect if last character is '\', indicating a multiline statement. -func (st statement) willContinue() bool { - if st.continued { - return true - } - if len(st.params) == 0 { - return false - } - return strings.HasSuffix(st.params[len(st.params)-1], `\`) -} - -// define returns the macro defined in this line. -// if none is defined "" is returned. -func (st statement) define() string { - if st.instruction == "#define" && len(st.params) > 0 { - r := strings.TrimSpace(strings.Split(st.params[0], "(")[0]) - r = strings.Trim(r, `\`) - return r - } - return "" -} - -func (st *statement) cleanParams() { - // Remove whitespace before semicolons - if strings.HasSuffix(st.instruction, ";") { - s := strings.TrimSuffix(st.instruction, ";") - st.instruction = strings.TrimSpace(s) + ";" - } -} - -// formatStatements will format a slice of statements and return each line -// as a separate string. -// Comments and line-continuation (\) are aligned with spaces. -func formatStatements(s []statement) []string { - res := make([]string, len(s)) - maxParam := 0 // Length of longest parameter - maxInstr := 0 // Length of longest instruction WITH parameters. - maxAlone := 0 // Length of longest instruction without parameters. - for i, x := range s { - // Clean up and store - x.cleanParams() - s[i] = x - - il := len([]rune(x.instruction)) + 1 // Instruction length - l := il - // Ignore length if we are a define "function" - // or we are a parameterless instruction. - if l > maxInstr && !x.function && !(x.isCommand() && len(x.params) == 0) { - maxInstr = l - } - if x.function && il > maxAlone { - maxAlone = il - } - if len(x.params) > 1 { - l = 2 * (len(x.params) - 1) // Spaces between parameters - } else { - l = 0 - } - // Add parameters - for _, y := range x.params { - l += len([]rune(y)) - } - l++ - if l > maxParam { - maxParam = l - } - } - - maxParam += maxInstr - if maxInstr == 0 { - maxInstr = maxAlone - } - - for i, x := range s { - r := x.instruction - if x.contComment { - res[i] = x.instruction - continue - } - p := strings.Join(x.params, ", ") - if len(x.params) > 0 || len(x.comment) > 0 { - for len(r) < maxInstr { - r += " " - } - } - r = r + p - if len(x.comment) > 0 && !x.continued { - it := maxParam - len([]rune(r)) - for i := 0; i < it; i++ { - r = r + " " - } - r += fmt.Sprintf("// %s", x.comment) - } - - if x.continued { - // Find continuation placement. - it := maxParam - len([]rune(r)) - if maxAlone > maxParam { - it = maxAlone - len([]rune(r)) - } - for i := 0; i < it; i++ { - r = r + " " - } - r += `\` - // Add comment, if any. - if len(x.comment) > 0 { - r += " // " + x.comment - } - } - res[i] = r - } - return res -} diff --git a/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/LICENSE b/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/LICENSE deleted file mode 100644 index 74487567..00000000 --- a/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/doc.go b/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/doc.go deleted file mode 100644 index f0ce966f..00000000 --- a/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Asmfmt formats Go Assembler files. -It uses tabs for indentation and blanks for alignment. - -Without an explicit path, it processes the standard input. Given a file, -it operates on that file; given a directory, it operates on all .go files in -that directory, recursively. (Files starting with a period are ignored.) -By default, asmfmt prints the reformatted sources to standard output. - -Usage: - asmfmt [flags] [path ...] - -The flags are: - -d - Do not print reformatted sources to standard output. - If a file's formatting is different than asmfmt's, print diffs - to standard output. - -e - Print all (including spurious) errors. - -l - Do not print reformatted sources to standard output. - If a file's formatting is different from asmfmt's, print its name - to standard output. - -w - Do not print reformatted sources to standard output. - If a file's formatting is different from asmfmt's, overwrite it - with asmfmt's version. - -Debugging support: - -cpuprofile filename - Write cpu profile to the specified file. - - -When asmfmt reads from standard input, it accepts either a full Assembler file -or a program fragment. A program fragment must be a syntactically -valid declaration list, statement list, or expression. - -*/ -package main diff --git a/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/main.go b/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/main.go deleted file mode 100644 index 1d68a3c2..00000000 --- a/vendor/github.com/klauspost/asmfmt/cmd/asmfmt/main.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Modified by Klaus Post 2015 for asmfmt -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime/pprof" - "strings" - - "github.com/klauspost/asmfmt" -) - -var ( - // main operation modes - list = flag.Bool("l", false, "list files whose formatting differs from asmfmt's") - write = flag.Bool("w", false, "write result to (source) file instead of stdout") - doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") - allErrors = flag.Bool("e", false, "report all errors (not just the first 10 on different lines)") - - // debugging - cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file") -) - -const ( - tabWidth = 8 -) - -var ( - exitCode = 0 - errors = 0 -) - -func report(err error) { - fmt.Fprintln(os.Stderr, err) - errors++ - if !*allErrors && errors >= 10 { - os.Exit(2) - } - exitCode = 2 -} - -func usage() { - fmt.Fprintf(os.Stderr, "usage: asmfmt [flags] [path ...]\n") - flag.PrintDefaults() - os.Exit(2) -} - -func isAsmFile(f os.FileInfo) bool { - // ignore non-Asm files - name := f.Name() - return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".s") -} - -// If in == nil, the source is the contents of the file with the given filename. -func processFile(filename string, in io.Reader, out io.Writer, stdin bool) error { - if in == nil { - f, err := os.Open(filename) - if err != nil { - return err - } - defer f.Close() - in = f - } - - src, err := ioutil.ReadAll(in) - if err != nil { - return err - } - - res, err := asmfmt.Format(bytes.NewBuffer(src)) - if err != nil { - return err - } - - if !bytes.Equal(src, res) { - // formatting has changed - if *list { - fmt.Fprintln(out, filename) - } - if *write { - err = ioutil.WriteFile(filename, res, 0644) - if err != nil { - return err - } - } - if *doDiff { - data, err := diff(src, res) - if err != nil { - return fmt.Errorf("computing diff: %s", err) - } - fmt.Printf("diff %s asmfmt/%s\n", filename, filename) - out.Write(data) - } - } - - if !*list && !*write && !*doDiff { - _, err = out.Write(res) - } - - return err -} - -func visitFile(path string, f os.FileInfo, err error) error { - if err == nil && isAsmFile(f) { - err = processFile(path, nil, os.Stdout, false) - } - if err != nil { - report(err) - } - return nil -} - -func walkDir(path string) { - filepath.Walk(path, visitFile) -} - -func main() { - // call gofmtMain in a separate function - // so that it can use defer and have them - // run before the exit. - gofmtMain() - os.Exit(exitCode) -} - -func gofmtMain() { - flag.Usage = usage - flag.Parse() - - if *cpuprofile != "" { - f, err := os.Create(*cpuprofile) - if err != nil { - fmt.Fprintf(os.Stderr, "creating cpu profile: %s\n", err) - exitCode = 2 - return - } - defer f.Close() - pprof.StartCPUProfile(f) - defer pprof.StopCPUProfile() - } - - if flag.NArg() == 0 { - if *write { - fmt.Fprintln(os.Stderr, "error: cannot use -w with standard input") - exitCode = 2 - return - } - if err := processFile("", os.Stdin, os.Stdout, true); err != nil { - report(err) - } - return - } - - for i := 0; i < flag.NArg(); i++ { - path := flag.Arg(i) - switch dir, err := os.Stat(path); { - case err != nil: - report(err) - case dir.IsDir(): - walkDir(path) - default: - if err := processFile(path, nil, os.Stdout, false); err != nil { - report(err) - } - } - } -} - -func diff(b1, b2 []byte) (data []byte, err error) { - f1, err := ioutil.TempFile("", "asmfmt") - if err != nil { - return - } - defer os.Remove(f1.Name()) - defer f1.Close() - - f2, err := ioutil.TempFile("", "asmfmt") - if err != nil { - return - } - defer os.Remove(f2.Name()) - defer f2.Close() - - f1.Write(b1) - f2.Write(b2) - - data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() - if len(data) > 0 { - // diff exits with a non-zero status when the files don't match. - // Ignore that failure as long as we get output. - err = nil - } - return - -} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index a2bf06e9..4c28dff4 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.7.2 + - go install mvdan.cc/garble@v0.10.1 builds: - @@ -92,16 +92,7 @@ builds: archives: - id: s2-binaries - name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" - replacements: - aix: AIX - darwin: OSX - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 - freebsd: FreeBSD - netbsd: NetBSD + name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" format_overrides: - goos: windows format: zip @@ -125,7 +116,7 @@ changelog: nfpms: - - file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" vendor: Klaus Post homepage: https://github.com/klauspost/compress maintainer: Klaus Post @@ -134,8 +125,3 @@ nfpms: formats: - deb - rpm - replacements: - darwin: Darwin - linux: Linux - freebsd: FreeBSD - amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 63f2cd5b..c918f11d 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,81 @@ This package provides various compression algorithms. # changelog +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + +* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) + * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 + +* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) + * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 + * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + +* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) + * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 + * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838 + * flate: Add limited window compression https://github.com/klauspost/compress/pull/843 + * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 + * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 + * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) + * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 + * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 + +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + +* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) + * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 + * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 + +* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) + * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 + * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 + * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 + * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 + * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 + * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 + * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + +* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) + * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 + * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 + * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 + * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 + * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 + +* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) + * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 + * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 + * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 + * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 + * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 + * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 + +

+ See changes to v1.15.x + +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + * Jan 3rd, 2023 (v1.15.14) * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 @@ -136,6 +211,8 @@ Stream decompression is now faster on asynchronous, since the goroutine allocati While the release has been extensively tested, it is recommended to testing when upgrading. +
+
See changes to v1.14.x @@ -489,7 +566,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: -``` +```go // replace 'ioutil.Discard' with your output. gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) if err != nil { @@ -594,6 +671,10 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv * [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. * [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. +* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. +* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. +* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. +* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. # license diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md new file mode 100644 index 00000000..ca6685e2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -0,0 +1,25 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Vulnerability Definition + +A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. + +Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. + +Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. + +It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. + +Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go deleted file mode 100644 index 82882961..00000000 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ /dev/null @@ -1,989 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Copyright (c) 2015 Klaus Post -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - NoCompression = 0 - BestSpeed = 1 - BestCompression = 9 - DefaultCompression = -1 - - // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman - // entropy encoding. This mode is useful in compressing data that has - // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) - // that lacks an entropy encoder. Compression gains are achieved when - // certain bytes in the input stream occur more frequently than others. - // - // Note that HuffmanOnly produces a compressed output that is - // RFC 1951 compliant. That is, any valid DEFLATE decompressor will - // continue to be able to decompress this output. - HuffmanOnly = -2 - ConstantCompression = HuffmanOnly // compatibility alias. - - logWindowSize = 15 - windowSize = 1 << logWindowSize - windowMask = windowSize - 1 - logMaxOffsetSize = 15 // Standard DEFLATE - minMatchLength = 4 // The smallest match that the compressor looks for - maxMatchLength = 258 // The longest match for the compressor - minOffsetSize = 1 // The shortest offset that makes any sense - - // The maximum number of tokens we will encode at the time. - // Smaller sizes usually creates less optimal blocks. - // Bigger can make context switching slow. - // We use this for levels 7-9, so we make it big. - maxFlateBlockTokens = 1 << 15 - maxStoreBlockSize = 65535 - hashBits = 17 // After 17 performance degrades - hashSize = 1 << hashBits - hashMask = (1 << hashBits) - 1 - hashShift = (hashBits + minMatchLength - 1) / minMatchLength - maxHashOffset = 1 << 28 - - skipNever = math.MaxInt32 - - debugDeflate = false -) - -type compressionLevel struct { - good, lazy, nice, chain, fastSkipHashing, level int -} - -// Compression levels have been rebalanced from zlib deflate defaults -// to give a bigger spread in speed and compression. -// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ -var levels = []compressionLevel{ - {}, // 0 - // Level 1-6 uses specialized algorithm - values not used - {0, 0, 0, 0, 0, 1}, - {0, 0, 0, 0, 0, 2}, - {0, 0, 0, 0, 0, 3}, - {0, 0, 0, 0, 0, 4}, - {0, 0, 0, 0, 0, 5}, - {0, 0, 0, 0, 0, 6}, - // Levels 7-9 use increasingly more lazy matching - // and increasingly stringent conditions for "good enough". - {8, 12, 16, 24, skipNever, 7}, - {16, 30, 40, 64, skipNever, 8}, - {32, 258, 258, 1024, skipNever, 9}, -} - -// advancedState contains state for the advanced levels, with bigger hash tables, etc. -type advancedState struct { - // deflate state - length int - offset int - maxInsertIndex int - chainHead int - hashOffset int - - ii uint16 // position of last match, intended to overflow to reset. - - // input window: unprocessed data is window[index:windowEnd] - index int - estBitsPerByte int - hashMatch [maxMatchLength + minMatchLength]uint32 - - // Input hash chains - // hashHead[hashValue] contains the largest inputIndex with the specified hash value - // If hashHead[hashValue] is within the current window, then - // hashPrev[hashHead[hashValue] & windowMask] contains the previous index - // with the same hash value. - hashHead [hashSize]uint32 - hashPrev [windowSize]uint32 -} - -type compressor struct { - compressionLevel - - h *huffmanEncoder - w *huffmanBitWriter - - // compression algorithm - fill func(*compressor, []byte) int // copy data to window - step func(*compressor) // process window - - window []byte - windowEnd int - blockStart int // window index where current tokens start - err error - - // queued output tokens - tokens tokens - fast fastEnc - state *advancedState - - sync bool // requesting flush - byteAvailable bool // if true, still need to process window[index-1]. -} - -func (d *compressor) fillDeflate(b []byte) int { - s := d.state - if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) { - // shift the window by windowSize - //copy(d.window[:], d.window[windowSize:2*windowSize]) - *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:]) - s.index -= windowSize - d.windowEnd -= windowSize - if d.blockStart >= windowSize { - d.blockStart -= windowSize - } else { - d.blockStart = math.MaxInt32 - } - s.hashOffset += windowSize - if s.hashOffset > maxHashOffset { - delta := s.hashOffset - 1 - s.hashOffset -= delta - s.chainHead -= delta - // Iterate over slices instead of arrays to avoid copying - // the entire table onto the stack (Issue #18625). - for i, v := range s.hashPrev[:] { - if int(v) > delta { - s.hashPrev[i] = uint32(int(v) - delta) - } else { - s.hashPrev[i] = 0 - } - } - for i, v := range s.hashHead[:] { - if int(v) > delta { - s.hashHead[i] = uint32(int(v) - delta) - } else { - s.hashHead[i] = 0 - } - } - } - } - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - var window []byte - if d.blockStart <= index { - window = d.window[d.blockStart:index] - } - d.blockStart = index - //d.w.writeBlock(tok, eof, window) - d.w.writeBlockDynamic(tok, eof, window, d.sync) - return d.w.err - } - return nil -} - -// writeBlockSkip writes the current block and uses the number of tokens -// to determine if the block should be stored on no matches, or -// only huffman encoded. -func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { - if index > 0 || eof { - if d.blockStart <= index { - window := d.window[d.blockStart:index] - // If we removed less than a 64th of all literals - // we huffman compress the block. - if int(tok.n) > len(window)-int(tok.n>>6) { - d.w.writeBlockHuff(eof, window, d.sync) - } else { - // Write a dynamic huffman block. - d.w.writeBlockDynamic(tok, eof, window, d.sync) - } - } else { - d.w.writeBlock(tok, eof, nil) - } - d.blockStart = index - return d.w.err - } - return nil -} - -// fillWindow will fill the current window with the supplied -// dictionary and calculate all hashes. -// This is much faster than doing a full encode. -// Should only be used after a start/reset. -func (d *compressor) fillWindow(b []byte) { - // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 { - return - } - if d.fast != nil { - // encode the last data, but discard the result - if len(b) > maxMatchOffset { - b = b[len(b)-maxMatchOffset:] - } - d.fast.Encode(&d.tokens, b) - d.tokens.Reset() - return - } - s := d.state - // If we are given too much, cut it. - if len(b) > windowSize { - b = b[len(b)-windowSize:] - } - // Add all to window. - n := copy(d.window[d.windowEnd:], b) - - // Calculate 256 hashes at the time (more L1 cache hits) - loops := (n + 256 - minMatchLength) / 256 - for j := 0; j < loops; j++ { - startindex := j * 256 - end := startindex + 256 + minMatchLength - 1 - if end > n { - end = n - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - - if dstSize <= 0 { - continue - } - - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - // Update window information. - d.windowEnd += n - s.index = n -} - -// Try to find a match starting at index whose length is greater than prevSize. -// We only look at chainCount possibilities before giving up. -// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead -func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } - - win := d.window[0 : pos+minMatchLook] - - // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } - - // If we've got a match that's good enough, only look in 1/4 the chain. - tries := d.chain - length = minMatchLength - 1 - - wEnd := win[pos+length] - wPos := win[pos:] - minIndex := pos - windowSize - if minIndex < 0 { - minIndex = 0 - } - offset = 0 - - if d.chain < 100 { - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - length = n - offset = pos - i - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return - } - - // Minimum gain to accept a match. - cGain := 4 - - // Some like it higher (CSV), some like it lower (JSON) - const baseCost = 3 - // Base is 4 bytes at with an additional cost. - // Matches must be better than this. - - for i := prevHead; tries > 0; tries-- { - if wEnd == win[i+length] { - n := matchLen(win[i:i+minMatchLook], wPos) - if n > length { - // Calculate gain. Estimate - newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]]) - - //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length) - if newGain > cGain { - length = n - offset = pos - i - cGain = newGain - ok = true - if n >= nice { - // The match is good enough that we don't try to find a better one. - break - } - wEnd = win[pos+n] - } - } - } - if i <= minIndex { - // hashPrev[i & windowMask] has already been overwritten, so stop now. - break - } - i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset - if i < minIndex { - break - } - } - return -} - -func (d *compressor) writeStoredBlock(buf []byte) error { - if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { - return d.w.err - } - d.w.writeBytes(buf) - return d.w.err -} - -// hash4 returns a hash representation of the first 4 bytes -// of the supplied slice. -// The caller must ensure that len(b) >= 4. -func hash4(b []byte) uint32 { - return hash4u(binary.LittleEndian.Uint32(b), hashBits) -} - -// hash4 returns the hash of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <32. -func hash4u(u uint32, h uint8) uint32 { - return (u * prime4bytes) >> (32 - h) -} - -// bulkHash4 will compute hashes using the same -// algorithm as hash4 -func bulkHash4(b []byte, dst []uint32) { - if len(b) < 4 { - return - } - hb := binary.LittleEndian.Uint32(b) - - dst[0] = hash4u(hb, hashBits) - end := len(b) - 4 + 1 - for i := 1; i < end; i++ { - hb = (hb >> 8) | uint32(b[i+3])<<24 - dst[i] = hash4u(hb, hashBits) - } -} - -func (d *compressor) initDeflate() { - d.window = make([]byte, 2*windowSize) - d.byteAvailable = false - d.err = nil - if d.state == nil { - return - } - s := d.state - s.index = 0 - s.hashOffset = 1 - s.length = minMatchLength - 1 - s.offset = 0 - s.chainHead = -1 -} - -// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, -// meaning it always has lazy matching on. -func (d *compressor) deflateLazy() { - s := d.state - // Sanity enables additional runtime tests. - // It's intended to be used during development - // to supplement the currently ad-hoc unit tests. - const sanity = debugDeflate - - if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync { - return - } - if d.windowEnd != s.index && d.chain > 100 { - // Get literal huffman coder. - if d.h == nil { - d.h = newHuffmanEncoder(maxFlateBlockTokens) - } - var tmp [256]uint16 - for _, v := range d.window[s.index:d.windowEnd] { - tmp[v]++ - } - d.h.generate(tmp[:], 15) - } - - s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) - - for { - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - lookahead := d.windowEnd - s.index - if lookahead < minMatchLength+maxMatchLength { - if !d.sync { - return - } - if sanity && s.index > d.windowEnd { - panic("index > windowEnd") - } - if lookahead == 0 { - // Flush current output block if any. - if d.byteAvailable { - // There is still one pending token that needs to be flushed - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - } - if d.tokens.n > 0 { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - return - } - } - if s.index < s.maxInsertIndex { - // Update the hash - hash := hash4(d.window[s.index:]) - ch := s.hashHead[hash] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[hash] = uint32(s.index + s.hashOffset) - } - prevLength := s.length - prevOffset := s.offset - s.length = minMatchLength - 1 - s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } - - if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { - if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { - s.length = newLength - s.offset = newOffset - } - } - - if prevLength >= minMatchLength && s.length <= prevLength { - // No better match, but check for better match at end... - // - // Skip forward a number of bytes. - // Offset of 2 seems to yield best results. 3 is sometimes better. - const checkOff = 2 - - // Check all, except full length - if prevLength < maxMatchLength-checkOff { - prevIndex := s.index - 1 - if prevIndex+prevLength < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength+checkOff { - end = maxMatchLength + checkOff - } - end += prevIndex - - // Hash at match end. - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength { - prevLength = length - prevOffset = prevIndex - ch2 - - // Extend back... - for i := checkOff - 1; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } else if false { - // Check one further ahead. - // Only rarely better, disabled for now. - prevIndex++ - h := hash4(d.window[prevIndex+prevLength:]) - ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength - if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff { - length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:]) - // It seems like a pure length metric is best. - if length > prevLength+checkOff { - prevLength = length - prevOffset = prevIndex - ch2 - prevIndex-- - - // Extend back... - for i := checkOff; i >= 0; i-- { - if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] { - // Emit tokens we "owe" - for j := 0; j <= i; j++ { - d.tokens.AddLiteral(d.window[prevIndex+j]) - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - } - break - } else { - prevLength++ - } - } - } - } - } - } - } - } - // There was a match at the previous step, and the current match is - // not better. Output the previous match. - d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) - - // Insert in the hash table all strings up to the end of the match. - // index and index-1 are already inserted. If there is not enough - // lookahead, the last two strings are not inserted into the hash - // table. - newIndex := s.index + prevLength - 1 - // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } - end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } - tocheck := d.window[startindex:end] - dstSize := len(tocheck) - minMatchLength + 1 - if dstSize > 0 { - dst := s.hashMatch[:dstSize] - bulkHash4(tocheck, dst) - var newH uint32 - for i, val := range dst { - di := i + startindex - newH = val & hashMask - // Get previous value with the same hash. - // Our chain should point to the previous value. - s.hashPrev[di&windowMask] = s.hashHead[newH] - // Set the head of the hash chain to us. - s.hashHead[newH] = uint32(di + s.hashOffset) - } - } - - s.index = newIndex - d.byteAvailable = false - s.length = minMatchLength - 1 - if d.tokens.n == maxFlateBlockTokens { - // The block includes the current character - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.ii = 0 - } else { - // Reset, if we got a match this run. - if s.length >= minMatchLength { - s.ii = 0 - } - // We have a byte waiting. Emit it. - if d.byteAvailable { - s.ii++ - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - s.index++ - - // If we have a long run of no matches, skip additional bytes - // Resets when s.ii overflows after 64KB. - if n := int(s.ii) - d.chain; n > 0 { - n = 1 + int(n>>6) - for j := 0; j < n; j++ { - if s.index >= d.windowEnd-1 { - break - } - d.tokens.AddLiteral(d.window[s.index-1]) - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - // Index... - if s.index < s.maxInsertIndex { - h := hash4(d.window[s.index:]) - ch := s.hashHead[h] - s.chainHead = int(ch) - s.hashPrev[s.index&windowMask] = ch - s.hashHead[h] = uint32(s.index + s.hashOffset) - } - s.index++ - } - // Flush last byte - d.tokens.AddLiteral(d.window[s.index-1]) - d.byteAvailable = false - // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength - if d.tokens.n == maxFlateBlockTokens { - if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil { - return - } - d.tokens.Reset() - } - } - } else { - s.index++ - d.byteAvailable = true - } - } - } -} - -func (d *compressor) store() { - if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - d.windowEnd = 0 - } -} - -// fillWindow will fill the buffer with data for huffman-only compression. -// The number of bytes copied is returned. -func (d *compressor) fillBlock(b []byte) int { - n := copy(d.window[d.windowEnd:], b) - d.windowEnd += n - return n -} - -// storeHuff will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeHuff() { - if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { - return - } - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - d.windowEnd = 0 -} - -// storeFast will compress and store the currently added data, -// if enough has been accumulated or we at the end of the stream. -// Any error that occurred will be in d.err -func (d *compressor) storeFast() { - // We only compress if we have maxStoreBlockSize. - if d.windowEnd < len(d.window) { - if !d.sync { - return - } - // Handle extremely small sizes. - if d.windowEnd < 128 { - if d.windowEnd == 0 { - return - } - if d.windowEnd <= 32 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - } else { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], true) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 - d.fast.Reset() - return - } - } - - d.fast.Encode(&d.tokens, d.window[:d.windowEnd]) - // If we made zero matches, store the block as is. - if d.tokens.n == 0 { - d.err = d.writeStoredBlock(d.window[:d.windowEnd]) - // If we removed less than 1/16th, huffman compress the block. - } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { - d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } else { - d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync) - d.err = d.w.err - } - d.tokens.Reset() - d.windowEnd = 0 -} - -// write will add input byte to the stream. -// Unless an error occurs all bytes will be consumed. -func (d *compressor) write(b []byte) (n int, err error) { - if d.err != nil { - return 0, d.err - } - n = len(b) - for len(b) > 0 { - if d.windowEnd == len(d.window) || d.sync { - d.step(d) - } - b = b[d.fill(d, b):] - if d.err != nil { - return 0, d.err - } - } - return n, d.err -} - -func (d *compressor) syncFlush() error { - d.sync = true - if d.err != nil { - return d.err - } - d.step(d) - if d.err == nil { - d.w.writeStoredHeader(0, false) - d.w.flush() - d.err = d.w.err - } - d.sync = false - return d.err -} - -func (d *compressor) init(w io.Writer, level int) (err error) { - d.w = newHuffmanBitWriter(w) - - switch { - case level == NoCompression: - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).store - case level == ConstantCompression: - d.w.logNewTablePenalty = 10 - d.window = make([]byte, 32<<10) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeHuff - case level == DefaultCompression: - level = 5 - fallthrough - case level >= 1 && level <= 6: - d.w.logNewTablePenalty = 7 - d.fast = newFastEnc(level) - d.window = make([]byte, maxStoreBlockSize) - d.fill = (*compressor).fillBlock - d.step = (*compressor).storeFast - case 7 <= level && level <= 9: - d.w.logNewTablePenalty = 8 - d.state = &advancedState{} - d.compressionLevel = levels[level] - d.initDeflate() - d.fill = (*compressor).fillDeflate - d.step = (*compressor).deflateLazy - default: - return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) - } - d.level = level - return nil -} - -// reset the state of the compressor. -func (d *compressor) reset(w io.Writer) { - d.w.reset(w) - d.sync = false - d.err = nil - // We only need to reset a few things for Snappy. - if d.fast != nil { - d.fast.Reset() - d.windowEnd = 0 - d.tokens.Reset() - return - } - switch d.compressionLevel.chain { - case 0: - // level was NoCompression or ConstantCompresssion. - d.windowEnd = 0 - default: - s := d.state - s.chainHead = -1 - for i := range s.hashHead { - s.hashHead[i] = 0 - } - for i := range s.hashPrev { - s.hashPrev[i] = 0 - } - s.hashOffset = 1 - s.index, d.windowEnd = 0, 0 - d.blockStart, d.byteAvailable = 0, false - d.tokens.Reset() - s.length = minMatchLength - 1 - s.offset = 0 - s.ii = 0 - s.maxInsertIndex = 0 - } -} - -func (d *compressor) close() error { - if d.err != nil { - return d.err - } - d.sync = true - d.step(d) - if d.err != nil { - return d.err - } - if d.w.writeStoredHeader(0, true); d.w.err != nil { - return d.w.err - } - d.w.flush() - d.w.reset(nil) - return d.w.err -} - -// NewWriter returns a new Writer compressing data at the given level. -// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); -// higher levels typically run slower but compress more. -// Level 0 (NoCompression) does not attempt any compression; it only adds the -// necessary DEFLATE framing. -// Level -1 (DefaultCompression) uses the default compression level. -// Level -2 (ConstantCompression) will use Huffman compression only, giving -// a very fast compression for all types of input, but sacrificing considerable -// compression efficiency. -// -// If level is in the range [-2, 9] then the error returned will be nil. -// Otherwise the error returned will be non-nil. -func NewWriter(w io.Writer, level int) (*Writer, error) { - var dw Writer - if err := dw.d.init(w, level); err != nil { - return nil, err - } - return &dw, nil -} - -// NewWriterDict is like NewWriter but initializes the new -// Writer with a preset dictionary. The returned Writer behaves -// as if the dictionary had been written to it without producing -// any compressed output. The compressed data written to w -// can only be decompressed by a Reader initialized with the -// same dictionary. -func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { - zw, err := NewWriter(w, level) - if err != nil { - return nil, err - } - zw.d.fillWindow(dict) - zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. - return zw, err -} - -// A Writer takes data written to it and writes the compressed -// form of that data to an underlying writer (see NewWriter). -type Writer struct { - d compressor - dict []byte -} - -// Write writes data to w, which will eventually write the -// compressed form of data to its underlying writer. -func (w *Writer) Write(data []byte) (n int, err error) { - return w.d.write(data) -} - -// Flush flushes any pending data to the underlying writer. -// It is useful mainly in compressed network protocols, to ensure that -// a remote reader has enough data to reconstruct a packet. -// Flush does not return until the data has been written. -// Calling Flush when there is no pending data still causes the Writer -// to emit a sync marker of at least 4 bytes. -// If the underlying writer returns an error, Flush returns that error. -// -// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. -func (w *Writer) Flush() error { - // For more about flushing: - // http://www.bolet.org/~pornin/deflate-flush.html - return w.d.syncFlush() -} - -// Close flushes and closes the writer. -func (w *Writer) Close() error { - return w.d.close() -} - -// Reset discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level and dictionary. -func (w *Writer) Reset(dst io.Writer) { - if len(w.dict) > 0 { - // w was created with NewWriterDict - w.d.reset(dst) - if dst != nil { - w.d.fillWindow(w.dict) - } - } else { - // w was created with NewWriter - w.d.reset(dst) - } -} - -// ResetDict discards the writer's state and makes it equivalent to -// the result of NewWriter or NewWriterDict called with dst -// and w's level, but sets a specific dictionary. -func (w *Writer) ResetDict(dst io.Writer, dict []byte) { - w.dict = dict - w.d.reset(dst) - w.d.fillWindow(w.dict) -} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go deleted file mode 100644 index bb36351a..00000000 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// dictDecoder implements the LZ77 sliding dictionary as used in decompression. -// LZ77 decompresses data through sequences of two forms of commands: -// -// - Literal insertions: Runs of one or more symbols are inserted into the data -// stream as is. This is accomplished through the writeByte method for a -// single symbol, or combinations of writeSlice/writeMark for multiple symbols. -// Any valid stream must start with a literal insertion if no preset dictionary -// is used. -// -// - Backward copies: Runs of one or more symbols are copied from previously -// emitted data. Backward copies come as the tuple (dist, length) where dist -// determines how far back in the stream to copy from and length determines how -// many bytes to copy. Note that it is valid for the length to be greater than -// the distance. Since LZ77 uses forward copies, that situation is used to -// perform a form of run-length encoding on repeated runs of symbols. -// The writeCopy and tryWriteCopy are used to implement this command. -// -// For performance reasons, this implementation performs little to no sanity -// checks about the arguments. As such, the invariants documented for each -// method call must be respected. -type dictDecoder struct { - hist []byte // Sliding window history - - // Invariant: 0 <= rdPos <= wrPos <= len(hist) - wrPos int // Current output position in buffer - rdPos int // Have emitted hist[:rdPos] already - full bool // Has a full window length been written yet? -} - -// init initializes dictDecoder to have a sliding window dictionary of the given -// size. If a preset dict is provided, it will initialize the dictionary with -// the contents of dict. -func (dd *dictDecoder) init(size int, dict []byte) { - *dd = dictDecoder{hist: dd.hist} - - if cap(dd.hist) < size { - dd.hist = make([]byte, size) - } - dd.hist = dd.hist[:size] - - if len(dict) > len(dd.hist) { - dict = dict[len(dict)-len(dd.hist):] - } - dd.wrPos = copy(dd.hist, dict) - if dd.wrPos == len(dd.hist) { - dd.wrPos = 0 - dd.full = true - } - dd.rdPos = dd.wrPos -} - -// histSize reports the total amount of historical data in the dictionary. -func (dd *dictDecoder) histSize() int { - if dd.full { - return len(dd.hist) - } - return dd.wrPos -} - -// availRead reports the number of bytes that can be flushed by readFlush. -func (dd *dictDecoder) availRead() int { - return dd.wrPos - dd.rdPos -} - -// availWrite reports the available amount of output buffer space. -func (dd *dictDecoder) availWrite() int { - return len(dd.hist) - dd.wrPos -} - -// writeSlice returns a slice of the available buffer to write data to. -// -// This invariant will be kept: len(s) <= availWrite() -func (dd *dictDecoder) writeSlice() []byte { - return dd.hist[dd.wrPos:] -} - -// writeMark advances the writer pointer by cnt. -// -// This invariant must be kept: 0 <= cnt <= availWrite() -func (dd *dictDecoder) writeMark(cnt int) { - dd.wrPos += cnt -} - -// writeByte writes a single byte to the dictionary. -// -// This invariant must be kept: 0 < availWrite() -func (dd *dictDecoder) writeByte(c byte) { - dd.hist[dd.wrPos] = c - dd.wrPos++ -} - -// writeCopy copies a string at a given (dist, length) to the output. -// This returns the number of bytes copied and may be less than the requested -// length if the available space in the output buffer is too small. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) writeCopy(dist, length int) int { - dstBase := dd.wrPos - dstPos := dstBase - srcPos := dstPos - dist - endPos := dstPos + length - if endPos > len(dd.hist) { - endPos = len(dd.hist) - } - - // Copy non-overlapping section after destination position. - // - // This section is non-overlapping in that the copy length for this section - // is always less than or equal to the backwards distance. This can occur - // if a distance refers to data that wraps-around in the buffer. - // Thus, a backwards copy is performed here; that is, the exact bytes in - // the source prior to the copy is placed in the destination. - if srcPos < 0 { - srcPos += len(dd.hist) - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) - srcPos = 0 - } - - // Copy possibly overlapping section before destination position. - // - // This section can overlap if the copy length for this section is larger - // than the backwards distance. This is allowed by LZ77 so that repeated - // strings can be succinctly represented using (dist, length) pairs. - // Thus, a forwards copy is performed here; that is, the bytes copied is - // possibly dependent on the resulting bytes in the destination as the copy - // progresses along. This is functionally equivalent to the following: - // - // for i := 0; i < endPos-dstPos; i++ { - // dd.hist[dstPos+i] = dd.hist[srcPos+i] - // } - // dstPos = endPos - // - for dstPos < endPos { - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// tryWriteCopy tries to copy a string at a given (distance, length) to the -// output. This specialized version is optimized for short distances. -// -// This method is designed to be inlined for performance reasons. -// -// This invariant must be kept: 0 < dist <= histSize() -func (dd *dictDecoder) tryWriteCopy(dist, length int) int { - dstPos := dd.wrPos - endPos := dstPos + length - if dstPos < dist || endPos > len(dd.hist) { - return 0 - } - dstBase := dstPos - srcPos := dstPos - dist - - // Copy possibly overlapping section before destination position. -loop: - dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) - if dstPos < endPos { - goto loop // Avoid for-loop so that this function can be inlined - } - - dd.wrPos = dstPos - return dstPos - dstBase -} - -// readFlush returns a slice of the historical buffer that is ready to be -// emitted to the user. The data returned by readFlush must be fully consumed -// before calling any other dictDecoder methods. -func (dd *dictDecoder) readFlush() []byte { - toRead := dd.hist[dd.rdPos:dd.wrPos] - dd.rdPos = dd.wrPos - if dd.wrPos == len(dd.hist) { - dd.wrPos, dd.rdPos = 0, 0 - dd.full = true - } - return toRead -} diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go deleted file mode 100644 index 24caf5f7..00000000 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Modified for deflate by Klaus Post (c) 2015. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" - "math/bits" -) - -type fastEnc interface { - Encode(dst *tokens, src []byte) - Reset() -} - -func newFastEnc(level int) fastEnc { - switch level { - case 1: - return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} - case 2: - return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} - case 3: - return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} - case 4: - return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} - case 5: - return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} - case 6: - return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} - default: - panic("invalid level specified") - } -} - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. - baseMatchOffset = 1 // The smallest match offset - baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 - maxMatchOffset = 1 << 15 // The largest match offset - - bTableBits = 17 // Bits used in the big tables - bTableSize = 1 << bTableBits // Size of the table - allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history. - bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this. -) - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - -type tableEntry struct { - offset int32 -} - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastGen struct { - hist []byte - cur int32 -} - -func (e *fastGen) addBlock(src []byte) int32 { - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.hist = make([]byte, 0, allocHistory) - } else { - if cap(e.hist) < maxMatchOffset*2 { - panic("unexpected buffer size") - } - // Move down - offset := int32(len(e.hist)) - maxMatchOffset - // copy(e.hist[0:maxMatchOffset], e.hist[offset:]) - *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:maxMatchOffset] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -type tableEntryPrev struct { - Cur tableEntry - Prev tableEntry -} - -// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits. -// Preferably h should be a constant and should always be <64. -func hash7(u uint64, h uint8) uint32 { - return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64)) -} - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} - -// matchlen will return the match length between offsets and t in src. -// The maximum length returned is maxMatchLength - 4. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlen(s, t int32, src []byte) int32 { - if debugDecode { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } - - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) -} - -// matchlenLong will return the match length between offsets and t in src. -// It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { - if debugDeflate { - if t >= s { - panic(fmt.Sprint("t >=s:", t, s)) - } - if int(s) >= len(src) { - panic(fmt.Sprint("s >= len(src):", s, len(src))) - } - if t < 0 { - panic(fmt.Sprint("t < 0:", t)) - } - if s-t > maxMatchOffset { - panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) - } - } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastGen) Reset() { - if cap(e.hist) < allocHistory { - e.hist = make([]byte, 0, allocHistory) - } - // We offset current position so everything will be out of reach. - // If we are above the buffer reset it will be cleared anyway since len(hist) == 0. - if e.cur <= bufferReset { - e.cur += maxMatchOffset + int32(len(e.hist)) - } - e.hist = e.hist[:0] -} - -// matchLen returns the maximum length. -// 'a' must be the shortest of the two. -func matchLen(a, b []byte) int { - var checked int - - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - return checked + (bits.TrailingZeros64(diff) >> 3) - } - checked += 8 - a = a[8:] - b = b[8:] - } - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - return i + checked - } - } - return len(a) + checked -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go deleted file mode 100644 index 89a5dd89..00000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ /dev/null @@ -1,1187 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // The largest offset code. - offsetCodeCount = 30 - - // The special code used to mark the end of a block. - endBlockMarker = 256 - - // The first length code. - lengthCodesStart = 257 - - // The number of codegen codes. - codegenCodeCount = 19 - badCode = 255 - - // maxPredefinedTokens is the maximum number of tokens - // where we check if fixed size is smaller. - maxPredefinedTokens = 250 - - // bufferFlushSize indicates the buffer size - // after which bytes are flushed to the writer. - // Should preferably be a multiple of 6, since - // we accumulate 6 bytes between writes to the buffer. - bufferFlushSize = 246 - - // bufferSize is the actual output byte buffer size. - // It must have additional headroom for a flush - // which can contain up to 8 bytes. - bufferSize = bufferFlushSize + 8 -) - -// Minimum length code that emits bits. -const lengthExtraBitsMinCode = 8 - -// The number of extra bits needed by length code X - LENGTH_CODES_START. -var lengthExtraBits = [32]uint8{ - /* 257 */ 0, 0, 0, - /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, - /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, - /* 280 */ 4, 5, 5, 5, 5, 0, -} - -// The length indicated by length code X - LENGTH_CODES_START. -var lengthBase = [32]uint8{ - 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, - 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, - 64, 80, 96, 112, 128, 160, 192, 224, 255, -} - -// Minimum offset code that emits bits. -const offsetExtraBitsMinCode = 4 - -// offset code word extra bits. -var offsetExtraBits = [32]int8{ - 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, - 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, - /* extended window */ - 14, 14, -} - -var offsetCombined = [32]uint32{} - -func init() { - var offsetBase = [32]uint32{ - /* normal deflate */ - 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, - 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, - 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, - 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, - 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, - 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, - - /* extended window */ - 0x008000, 0x00c000, - } - - for i := range offsetCombined[:] { - // Don't use extended window values... - if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 { - continue - } - offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8) - } -} - -// The odd order in which the codegen code sizes are written. -var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -type huffmanBitWriter struct { - // writer is the underlying writer. - // Do not use it directly; use the write method, which ensures - // that Write errors are sticky. - writer io.Writer - - // Data waiting to be written is bytes[0:nbytes] - // and then the low nbits of bits. - bits uint64 - nbits uint8 - nbytes uint8 - lastHuffMan bool - literalEncoding *huffmanEncoder - tmpLitEncoding *huffmanEncoder - offsetEncoding *huffmanEncoder - codegenEncoding *huffmanEncoder - err error - lastHeader int - // Set between 0 (reused block can be up to 2x the size) - logNewTablePenalty uint - bytes [256 + 8]byte - literalFreq [lengthCodesStart + 32]uint16 - offsetFreq [32]uint16 - codegenFreq [codegenCodeCount]uint16 - - // codegen must have an extra space for the final symbol. - codegen [literalCount + offsetCodeCount + 1]uint8 -} - -// Huffman reuse. -// -// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections. -// -// This is controlled by several variables: -// -// If lastHeader is non-zero the Huffman table can be reused. -// This also indicates that a Huffman table has been generated that can output all -// possible symbols. -// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated -// an EOB with the previous table must be written. -// -// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid. -// -// An incoming block estimates the output size of a new table using a 'fresh' by calculating the -// optimal size and adding a penalty in 'logNewTablePenalty'. -// A Huffman table is not optimal, which is why we add a penalty, and generating a new table -// is slower both for compression and decompression. - -func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { - return &huffmanBitWriter{ - writer: w, - literalEncoding: newHuffmanEncoder(literalCount), - tmpLitEncoding: newHuffmanEncoder(literalCount), - codegenEncoding: newHuffmanEncoder(codegenCodeCount), - offsetEncoding: newHuffmanEncoder(offsetCodeCount), - } -} - -func (w *huffmanBitWriter) reset(writer io.Writer) { - w.writer = writer - w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil - w.lastHeader = 0 - w.lastHuffMan = false -} - -func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) { - a := t.offHist[:offsetCodeCount] - b := w.offsetEncoding.codes - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.extraHist[:literalCount-256] - b = w.literalEncoding.codes[256:literalCount] - b = b[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - - a = t.litHist[:256] - b = w.literalEncoding.codes[:len(a)] - for i, v := range a { - if v != 0 && b[i].zero() { - return false - } - } - return true -} - -func (w *huffmanBitWriter) flush() { - if w.err != nil { - w.nbits = 0 - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - n := w.nbytes - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - if w.nbits > 8 { // Avoid underflow - w.nbits -= 8 - } else { - w.nbits = 0 - } - n++ - } - w.bits = 0 - w.write(w.bytes[:n]) - w.nbytes = 0 -} - -func (w *huffmanBitWriter) write(b []byte) { - if w.err != nil { - return - } - _, w.err = w.writer.Write(b) -} - -func (w *huffmanBitWriter) writeBits(b int32, nb uint8) { - w.bits |= uint64(b) << (w.nbits & 63) - w.nbits += nb - if w.nbits >= 48 { - w.writeOutBits() - } -} - -func (w *huffmanBitWriter) writeBytes(bytes []byte) { - if w.err != nil { - return - } - n := w.nbytes - if w.nbits&7 != 0 { - w.err = InternalError("writeBytes with unfinished bits") - return - } - for w.nbits != 0 { - w.bytes[n] = byte(w.bits) - w.bits >>= 8 - w.nbits -= 8 - n++ - } - if n != 0 { - w.write(w.bytes[:n]) - } - w.nbytes = 0 - w.write(bytes) -} - -// RFC 1951 3.2.7 specifies a special run-length encoding for specifying -// the literal and offset lengths arrays (which are concatenated into a single -// array). This method generates that run-length encoding. -// -// The result is written into the codegen array, and the frequencies -// of each code is written into the codegenFreq array. -// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional -// information. Code badCode is an end marker -// -// numLiterals The number of literals in literalEncoding -// numOffsets The number of offsets in offsetEncoding -// litenc, offenc The literal and offset encoder to use -func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { - for i := range w.codegenFreq { - w.codegenFreq[i] = 0 - } - // Note that we are using codegen both as a temporary variable for holding - // a copy of the frequencies, and as the place where we put the result. - // This is fine because the output is always shorter than the input used - // so far. - codegen := w.codegen[:] // cache - // Copy the concatenated code sizes to codegen. Put a marker at the end. - cgnl := codegen[:numLiterals] - for i := range cgnl { - cgnl[i] = litEnc.codes[i].len() - } - - cgnl = codegen[numLiterals : numLiterals+numOffsets] - for i := range cgnl { - cgnl[i] = offEnc.codes[i].len() - } - codegen[numLiterals+numOffsets] = badCode - - size := codegen[0] - count := 1 - outIndex := 0 - for inIndex := 1; size != badCode; inIndex++ { - // INVARIANT: We have seen "count" copies of size that have not yet - // had output generated for them. - nextSize := codegen[inIndex] - if nextSize == size { - count++ - continue - } - // We need to generate codegen indicating "count" of size. - if size != 0 { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - count-- - for count >= 3 { - n := 6 - if n > count { - n = count - } - codegen[outIndex] = 16 - outIndex++ - codegen[outIndex] = uint8(n - 3) - outIndex++ - w.codegenFreq[16]++ - count -= n - } - } else { - for count >= 11 { - n := 138 - if n > count { - n = count - } - codegen[outIndex] = 18 - outIndex++ - codegen[outIndex] = uint8(n - 11) - outIndex++ - w.codegenFreq[18]++ - count -= n - } - if count >= 3 { - // count >= 3 && count <= 10 - codegen[outIndex] = 17 - outIndex++ - codegen[outIndex] = uint8(count - 3) - outIndex++ - w.codegenFreq[17]++ - count = 0 - } - } - count-- - for ; count >= 0; count-- { - codegen[outIndex] = size - outIndex++ - w.codegenFreq[size]++ - } - // Set up invariant for next time through the loop. - size = nextSize - count = 1 - } - // Marker indicating the end of the codegen. - codegen[outIndex] = badCode -} - -func (w *huffmanBitWriter) codegens() int { - numCodegens := len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return numCodegens -} - -func (w *huffmanBitWriter) headerSize() (size, numCodegens int) { - numCodegens = len(w.codegenFreq) - for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { - numCodegens-- - } - return 3 + 5 + 5 + 4 + (3 * numCodegens) + - w.codegenEncoding.bitLength(w.codegenFreq[:]) + - int(w.codegenFreq[16])*2 + - int(w.codegenFreq[17])*3 + - int(w.codegenFreq[18])*7, numCodegens -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) { - size = litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) - return size -} - -// dynamicSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { - header, numCodegens := w.headerSize() - size = header + - litEnc.bitLength(w.literalFreq[:]) + - offEnc.bitLength(w.offsetFreq[:]) + - extraBits - return size, numCodegens -} - -// extraBitSize will return the number of bits that will be written -// as "extra" bits on matches. -func (w *huffmanBitWriter) extraBitSize() int { - total := 0 - for i, n := range w.literalFreq[257:literalCount] { - total += int(n) * int(lengthExtraBits[i&31]) - } - for i, n := range w.offsetFreq[:offsetCodeCount] { - total += int(n) * int(offsetExtraBits[i&31]) - } - return total -} - -// fixedSize returns the size of dynamically encoded data in bits. -func (w *huffmanBitWriter) fixedSize(extraBits int) int { - return 3 + - fixedLiteralEncoding.bitLength(w.literalFreq[:]) + - fixedOffsetEncoding.bitLength(w.offsetFreq[:]) + - extraBits -} - -// storedSize calculates the stored size, including header. -// The function returns the size in bits and whether the block -// fits inside a single block. -func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { - if in == nil { - return 0, false - } - if len(in) <= maxStoreBlockSize { - return (len(in) + 5) * 8, true - } - return 0, false -} - -func (w *huffmanBitWriter) writeCode(c hcode) { - // The function does not get inlined if we "& 63" the shift. - w.bits |= c.code64() << (w.nbits & 63) - w.nbits += c.len() - if w.nbits >= 48 { - w.writeOutBits() - } -} - -// writeOutBits will write bits to the buffer. -func (w *huffmanBitWriter) writeOutBits() { - bits := w.bits - w.bits >>= 48 - w.nbits -= 48 - n := w.nbytes - - // We over-write, but faster... - binary.LittleEndian.PutUint64(w.bytes[n:], bits) - n += 6 - - if n >= bufferFlushSize { - if w.err != nil { - n = 0 - return - } - w.write(w.bytes[:n]) - n = 0 - } - - w.nbytes = n -} - -// Write the header of a dynamic Huffman block to the output stream. -// -// numLiterals The number of literals specified in codegen -// numOffsets The number of offsets specified in codegen -// numCodegens The number of codegens used in codegen -func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { - if w.err != nil { - return - } - var firstBits int32 = 4 - if isEof { - firstBits = 5 - } - w.writeBits(firstBits, 3) - w.writeBits(int32(numLiterals-257), 5) - w.writeBits(int32(numOffsets-1), 5) - w.writeBits(int32(numCodegens-4), 4) - - for i := 0; i < numCodegens; i++ { - value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) - w.writeBits(int32(value), 3) - } - - i := 0 - for { - var codeWord = uint32(w.codegen[i]) - i++ - if codeWord == badCode { - break - } - w.writeCode(w.codegenEncoding.codes[codeWord]) - - switch codeWord { - case 16: - w.writeBits(int32(w.codegen[i]), 2) - i++ - case 17: - w.writeBits(int32(w.codegen[i]), 3) - i++ - case 18: - w.writeBits(int32(w.codegen[i]), 7) - i++ - } - } -} - -// writeStoredHeader will write a stored header. -// If the stored block is only used for EOF, -// it is replaced with a fixed huffman block. -func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes. - if length == 0 && isEof { - w.writeFixedHeader(isEof) - // EOB: 7 bits, value: 0 - w.writeBits(0, 7) - w.flush() - return - } - - var flag int32 - if isEof { - flag = 1 - } - w.writeBits(flag, 3) - w.flush() - w.writeBits(int32(length), 16) - w.writeBits(int32(^uint16(length)), 16) -} - -func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { - if w.err != nil { - return - } - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - // Indicate that we are a fixed Huffman block - var value int32 = 2 - if isEof { - value = 3 - } - w.writeBits(value, 3) -} - -// writeBlock will write a block of tokens with the smallest encoding. -// The original input can be supplied, and if the huffman encoded data -// is larger than the original bytes, the data will be written as a -// stored block. -// If the input is nil, the tokens will always be Huffman encoded. -func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) { - if w.err != nil { - return - } - - tokens.AddEOB() - if w.lastHeader > 0 { - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - numLiterals, numOffsets := w.indexTokens(tokens, false) - w.generate() - var extraBits int - storedSize, storable := w.storedSize(input) - if storable { - extraBits = w.extraBitSize() - } - - // Figure out smallest code. - // Fixed Huffman baseline. - var literalEncoding = fixedLiteralEncoding - var offsetEncoding = fixedOffsetEncoding - var size = math.MaxInt32 - if tokens.n < maxPredefinedTokens { - size = w.fixedSize(extraBits) - } - - // Dynamic Huffman? - var numCodegens int - - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - if dynamicSize < size { - size = dynamicSize - literalEncoding = w.literalEncoding - offsetEncoding = w.offsetEncoding - } - - // Stored bytes? - if storable && storedSize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Huffman. - if literalEncoding == fixedLiteralEncoding { - w.writeFixedHeader(eof) - } else { - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - } - - // Write the tokens. - w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes) -} - -// writeBlockDynamic encodes a block using a dynamic Huffman table. -// This should be used if the symbols used have a disproportionate -// histogram distribution. -// If input is supplied and the compression savings are below 1/16th of the -// input size the block is stored. -func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - sync = sync || eof - if sync { - tokens.AddEOB() - } - - // We cannot reuse pure huffman table, and must mark as EOF. - if (w.lastHuffMan || eof) && w.lastHeader > 0 { - // We will not try to reuse. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } - - // fillReuse enables filling of empty values. - // This will make encodings always reusable without testing. - // However, this does not appear to benefit on most cases. - const fillReuse = false - - // Check if we can reuse... - if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } - - numLiterals, numOffsets := w.indexTokens(tokens, !sync) - extraBits := 0 - ssize, storable := w.storedSize(input) - - const usePrefs = true - if storable || w.lastHeader > 0 { - extraBits = w.extraBitSize() - } - - var size int - - // Check if we should reuse. - if w.lastHeader > 0 { - // Estimate size for using a new table. - // Use the previous header size as the best estimate. - newSize := w.lastHeader + tokens.EstimatedBits() - newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty - - // The estimated size is calculated as an optimal table. - // We add a penalty to make it more realistic and re-use a bit more. - reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits - - // Check if a new table is better. - if newSize < reuseSize { - // Write the EOB we owe. - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - size = newSize - w.lastHeader = 0 - } else { - size = reuseSize - } - - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size { - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - // Check if we get a reasonable size decrease. - if storable && ssize <= size { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - - // We want a new block/table - if w.lastHeader == 0 { - if fillReuse && !sync { - w.fillTokens() - numLiterals, numOffsets = maxNumLit, maxNumDist - } else { - w.literalFreq[endBlockMarker] = 1 - } - - w.generate() - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - - var numCodegens int - if fillReuse && !sync { - // Reindex for accurate size... - w.indexTokens(tokens, true) - } - size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) - - // Store predefined, if we don't get a reasonable improvement. - if tokens.n < maxPredefinedTokens { - if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size { - // Store bytes, if we don't get an improvement. - if storable && ssize <= preSize { - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - w.writeFixedHeader(eof) - if !sync { - tokens.AddEOB() - } - w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes) - return - } - } - - if storable && ssize <= size { - // Store bytes, if we don't get an improvement. - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - // Write Huffman table. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - if !sync { - w.lastHeader, _ = w.headerSize() - } - w.lastHuffMan = false - } - - if sync { - w.lastHeader = 0 - } - // Write the tokens. - w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes) -} - -func (w *huffmanBitWriter) fillTokens() { - for i, v := range w.literalFreq[:literalCount] { - if v == 0 { - w.literalFreq[i] = 1 - } - } - for i, v := range w.offsetFreq[:offsetCodeCount] { - if v == 0 { - w.offsetFreq[i] = 1 - } - } -} - -// indexTokens indexes a slice of tokens, and updates -// literalFreq and offsetFreq, and generates literalEncoding -// and offsetEncoding. -// The number of literal and offset tokens is returned. -func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) { - //copy(w.literalFreq[:], t.litHist[:]) - *(*[256]uint16)(w.literalFreq[:]) = t.litHist - //copy(w.literalFreq[256:], t.extraHist[:]) - *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist - w.offsetFreq = t.offHist - - if t.n == 0 { - return - } - if filled { - return maxNumLit, maxNumDist - } - // get the number of literals - numLiterals = len(w.literalFreq) - for w.literalFreq[numLiterals-1] == 0 { - numLiterals-- - } - // get the number of offsets - numOffsets = len(w.offsetFreq) - for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { - numOffsets-- - } - if numOffsets == 0 { - // We haven't found a single match. If we want to go with the dynamic encoding, - // we should count at least one offset to be sure that the offset huffman tree could be encoded. - w.offsetFreq[0] = 1 - numOffsets = 1 - } - return -} - -func (w *huffmanBitWriter) generate() { - w.literalEncoding.generate(w.literalFreq[:literalCount], 15) - w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeTokens writes a slice of tokens to the output. -// codes for literal and offset encoding must be supplied. -func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { - if w.err != nil { - return - } - if len(tokens) == 0 { - return - } - - // Only last token should be endBlockMarker. - var deferEOB bool - if tokens[len(tokens)-1] == endBlockMarker { - tokens = tokens[:len(tokens)-1] - deferEOB = true - } - - // Create slices up to the next power of two to avoid bounds checks. - lits := leCodes[:256] - offs := oeCodes[:32] - lengths := leCodes[lengthCodesStart:] - lengths = lengths[:32] - - // Go 1.16 LOVES having these on stack. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - for _, t := range tokens { - if t < 256 { - //w.writeCode(lits[t.literal()]) - c := lits[t] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - continue - } - - // Write the length - length := t.length() - lengthCode := lengthCode(length) & 31 - if false { - w.writeCode(lengths[lengthCode]) - } else { - // inlined - c := lengths[lengthCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if lengthCode >= lengthExtraBitsMinCode { - extraLengthBits := lengthExtraBits[lengthCode] - //w.writeBits(extraLength, extraLengthBits) - extraLength := int32(length - lengthBase[lengthCode]) - bits |= uint64(extraLength) << (nbits & 63) - nbits += extraLengthBits - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - // Write the offset - offset := t.offset() - offsetCode := (offset >> 16) & 31 - if false { - w.writeCode(offs[offsetCode]) - } else { - // inlined - c := offs[offsetCode] - bits |= c.code64() << (nbits & 63) - nbits += c.len() - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - - if offsetCode >= offsetExtraBitsMinCode { - offsetComb := offsetCombined[offsetCode] - //w.writeBits(extraOffset, extraOffsetBits) - bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) - nbits += uint8(offsetComb) - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if deferEOB { - w.writeCode(leCodes[endBlockMarker]) - } -} - -// huffOffset is a static offset encoder used for huffman only encoding. -// It can be reused since we will not be encoding offset values. -var huffOffset *huffmanEncoder - -func init() { - w := newHuffmanBitWriter(nil) - w.offsetFreq[0] = 1 - huffOffset = newHuffmanEncoder(offsetCodeCount) - huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15) -} - -// writeBlockHuff encodes a block of bytes as either -// Huffman encoded literals or uncompressed bytes if the -// results only gains very little from compression. -func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { - if w.err != nil { - return - } - - // Clear histogram - for i := range w.literalFreq[:] { - w.literalFreq[i] = 0 - } - if !w.lastHuffMan { - for i := range w.offsetFreq[:] { - w.offsetFreq[i] = 0 - } - } - - const numLiterals = endBlockMarker + 1 - const numOffsets = 1 - - // Add everything as literals - // We have to estimate the header size. - // Assume header is around 70 bytes: - // https://stackoverflow.com/a/25454430 - const guessHeaderSizeBits = 70 * 8 - histogram(input, w.literalFreq[:numLiterals]) - ssize, storable := w.storedSize(input) - if storable && len(input) > 1024 { - // Quick check for incompressible content. - abs := float64(0) - avg := float64(len(input)) / 256 - max := float64(len(input) * 2) - for _, v := range w.literalFreq[:256] { - diff := float64(v) - avg - abs += diff * diff - if abs > max { - break - } - } - if abs < max { - if debugDeflate { - fmt.Println("stored", abs, "<", max) - } - // No chance we can compress this... - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - } - w.literalFreq[endBlockMarker] = 1 - w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15) - estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals]) - if estBits < math.MaxInt32 { - estBits += w.lastHeader - if w.lastHeader == 0 { - estBits += guessHeaderSizeBits - } - estBits += estBits >> w.logNewTablePenalty - } - - // Store bytes, if we don't get a reasonable improvement. - if storable && ssize <= estBits { - if debugDeflate { - fmt.Println("stored,", ssize, "<=", estBits) - } - w.writeStoredHeader(len(input), eof) - w.writeBytes(input) - return - } - - if w.lastHeader > 0 { - reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256]) - - if estBits < reuseSize { - if debugDeflate { - fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes") - } - // We owe an EOB - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - } else if debugDeflate { - fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8) - } - } - - count := 0 - if w.lastHeader == 0 { - // Use the temp encoding, so swap. - w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding - // Generate codegen and codegenFrequencies, which indicates how to encode - // the literalEncoding and the offsetEncoding. - w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) - w.codegenEncoding.generate(w.codegenFreq[:], 7) - numCodegens := w.codegens() - - // Huffman. - w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) - w.lastHuffMan = true - w.lastHeader, _ = w.headerSize() - if debugDeflate { - count += w.lastHeader - fmt.Println("header:", count/8) - } - } - - encoding := w.literalEncoding.codes[:256] - // Go 1.16 LOVES having these on stack. At least 1.5x the speed. - bits, nbits, nbytes := w.bits, w.nbits, w.nbytes - - if debugDeflate { - count -= int(nbytes)*8 + int(nbits) - } - // Unroll, write 3 codes/loop. - // Fastest number of unrolls. - for len(input) > 3 { - // We must have at least 48 bits free. - if nbits >= 8 { - n := nbits >> 3 - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - bits >>= (n * 8) & 63 - nbits -= n * 8 - nbytes += n - } - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - a, b := encoding[input[0]], encoding[input[1]] - bits |= a.code64() << (nbits & 63) - bits |= b.code64() << ((nbits + a.len()) & 63) - c := encoding[input[2]] - nbits += b.len() + a.len() - bits |= c.code64() << (nbits & 63) - nbits += c.len() - input = input[3:] - } - - // Remaining... - for _, t := range input { - if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits - bits >>= 48 - nbits -= 48 - nbytes += 6 - if nbytes >= bufferFlushSize { - if w.err != nil { - nbytes = 0 - return - } - if debugDeflate { - count += int(nbytes) * 8 - } - _, w.err = w.writer.Write(w.bytes[:nbytes]) - nbytes = 0 - } - } - // Bitwriting inlined, ~30% speedup - c := encoding[t] - bits |= c.code64() << (nbits & 63) - - nbits += c.len() - if debugDeflate { - count += int(c.len()) - } - } - // Restore... - w.bits, w.nbits, w.nbytes = bits, nbits, nbytes - - if debugDeflate { - nb := count + int(nbytes)*8 + int(nbits) - fmt.Println("wrote", nb, "bits,", nb/8, "bytes.") - } - // Flush if needed to have space. - if w.nbits >= 48 { - w.writeOutBits() - } - - if eof || sync { - w.writeCode(w.literalEncoding.codes[endBlockMarker]) - w.lastHeader = 0 - w.lastHuffMan = false - } -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go deleted file mode 100644 index be7b58b4..00000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ /dev/null @@ -1,417 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "math" - "math/bits" -) - -const ( - maxBitsLimit = 16 - // number of valid literals - literalCount = 286 -) - -// hcode is a huffman code with a bit code and bit length. -type hcode uint32 - -func (h hcode) len() uint8 { - return uint8(h) -} - -func (h hcode) code64() uint64 { - return uint64(h >> 8) -} - -func (h hcode) zero() bool { - return h == 0 -} - -type huffmanEncoder struct { - codes []hcode - bitCount [17]int32 - - // Allocate a reusable buffer with the longest possible frequency table. - // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount. - // The largest of these is literalCount, so we allocate for that case. - freqcache [literalCount + 1]literalNode -} - -type literalNode struct { - literal uint16 - freq uint16 -} - -// A levelInfo describes the state of the constructed tree for a given depth. -type levelInfo struct { - // Our level. for better printing - level int32 - - // The frequency of the last node at this level - lastFreq int32 - - // The frequency of the next character to add to this level - nextCharFreq int32 - - // The frequency of the next pair (from level below) to add to this level. - // Only valid if the "needed" value of the next lower level is 0. - nextPairFreq int32 - - // The number of chains remaining to generate for this level before moving - // up to the next level - needed int32 -} - -// set sets the code and length of an hcode. -func (h *hcode) set(code uint16, length uint8) { - *h = hcode(length) | (hcode(code) << 8) -} - -func newhcode(code uint16, length uint8) hcode { - return hcode(length) | (hcode(code) << 8) -} - -func reverseBits(number uint16, bitLength byte) uint16 { - return bits.Reverse16(number << ((16 - bitLength) & 15)) -} - -func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} } - -func newHuffmanEncoder(size int) *huffmanEncoder { - // Make capacity to next power of two. - c := uint(bits.Len32(uint32(size - 1))) - return &huffmanEncoder{codes: make([]hcode, size, 1<= 3 -// The cases of 0, 1, and 2 literals are handled by special case code. -// -// list An array of the literals with non-zero frequencies -// -// and their associated frequencies. The array is in order of increasing -// frequency, and has as its last element a special element with frequency -// MaxInt32 -// -// maxBits The maximum number of bits that should be used to encode any literal. -// -// Must be less than 16. -// -// return An integer array in which array[i] indicates the number of literals -// -// that should be encoded in i bits. -func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { - if maxBits >= maxBitsLimit { - panic("flate: maxBits too large") - } - n := int32(len(list)) - list = list[0 : n+1] - list[n] = maxNode() - - // The tree can't have greater depth than n - 1, no matter what. This - // saves a little bit of work in some small cases - if maxBits > n-1 { - maxBits = n - 1 - } - - // Create information about each of the levels. - // A bogus "Level 0" whose sole purpose is so that - // level1.prev.needed==0. This makes level1.nextPairFreq - // be a legitimate value that never gets chosen. - var levels [maxBitsLimit]levelInfo - // leafCounts[i] counts the number of literals at the left - // of ancestors of the rightmost node at level i. - // leafCounts[i][j] is the number of literals at the left - // of the level j ancestor. - var leafCounts [maxBitsLimit][maxBitsLimit]int32 - - // Descending to only have 1 bounds check. - l2f := int32(list[2].freq) - l1f := int32(list[1].freq) - l0f := int32(list[0].freq) + int32(list[1].freq) - - for level := int32(1); level <= maxBits; level++ { - // For every level, the first two items are the first two characters. - // We initialize the levels as if we had already figured this out. - levels[level] = levelInfo{ - level: level, - lastFreq: l1f, - nextCharFreq: l2f, - nextPairFreq: l0f, - } - leafCounts[level][level] = 2 - if level == 1 { - levels[level].nextPairFreq = math.MaxInt32 - } - } - - // We need a total of 2*n - 2 items at top level and have already generated 2. - levels[maxBits].needed = 2*n - 4 - - level := uint32(maxBits) - for level < 16 { - l := &levels[level] - if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { - // We've run out of both leafs and pairs. - // End all calculations for this level. - // To make sure we never come back to this level or any lower level, - // set nextPairFreq impossibly large. - l.needed = 0 - levels[level+1].nextPairFreq = math.MaxInt32 - level++ - continue - } - - prevFreq := l.lastFreq - if l.nextCharFreq < l.nextPairFreq { - // The next item on this row is a leaf node. - n := leafCounts[level][level] + 1 - l.lastFreq = l.nextCharFreq - // Lower leafCounts are the same of the previous node. - leafCounts[level][level] = n - e := list[n] - if e.literal < math.MaxUint16 { - l.nextCharFreq = int32(e.freq) - } else { - l.nextCharFreq = math.MaxInt32 - } - } else { - // The next item on this row is a pair from the previous row. - // nextPairFreq isn't valid until we generate two - // more values in the level below - l.lastFreq = l.nextPairFreq - // Take leaf counts from the lower level, except counts[level] remains the same. - if true { - save := leafCounts[level][level] - leafCounts[level] = leafCounts[level-1] - leafCounts[level][level] = save - } else { - copy(leafCounts[level][:level], leafCounts[level-1][:level]) - } - levels[l.level-1].needed = 2 - } - - if l.needed--; l.needed == 0 { - // We've done everything we need to do for this level. - // Continue calculating one level up. Fill in nextPairFreq - // of that level with the sum of the two nodes we've just calculated on - // this level. - if l.level == maxBits { - // All done! - break - } - levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq - level++ - } else { - // If we stole from below, move down temporarily to replenish it. - for levels[level-1].needed > 0 { - level-- - } - } - } - - // Somethings is wrong if at the end, the top level is null or hasn't used - // all of the leaves. - if leafCounts[maxBits][maxBits] != n { - panic("leafCounts[maxBits][maxBits] != n") - } - - bitCount := h.bitCount[:maxBits+1] - bits := 1 - counts := &leafCounts[maxBits] - for level := maxBits; level > 0; level-- { - // chain.leafCount gives the number of literals requiring at least "bits" - // bits to encode. - bitCount[bits] = counts[level] - counts[level-1] - bits++ - } - return bitCount -} - -// Look at the leaves and assign them a bit count and an encoding as specified -// in RFC 1951 3.2.2 -func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { - code := uint16(0) - for n, bits := range bitCount { - code <<= 1 - if n == 0 || bits == 0 { - continue - } - // The literals list[len(list)-bits] .. list[len(list)-bits] - // are encoded using "bits" bits, and get the values - // code, code + 1, .... The code values are - // assigned in literal order (not frequency order). - chunk := list[len(list)-int(bits):] - - sortByLiteral(chunk) - for _, node := range chunk { - h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n)) - code++ - } - list = list[0 : len(list)-int(bits)] - } -} - -// Update this Huffman Code object to be the minimum code for the specified frequency count. -// -// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. -// maxBits The maximum number of bits to use for any literal. -func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) { - list := h.freqcache[:len(freq)+1] - codes := h.codes[:len(freq)] - // Number of non-zero literals - count := 0 - // Set list to be the set of all non-zero literals and their frequencies - for i, f := range freq { - if f != 0 { - list[count] = literalNode{uint16(i), f} - count++ - } else { - codes[i] = 0 - } - } - list[count] = literalNode{} - - list = list[:count] - if count <= 2 { - // Handle the small cases here, because they are awkward for the general case code. With - // two or fewer literals, everything has bit length 1. - for i, node := range list { - // "list" is in order of increasing literal value. - h.codes[node.literal].set(uint16(i), 1) - } - return - } - sortByFreq(list) - - // Get the number of literals for each bit count - bitCount := h.bitCounts(list, maxBits) - // And do the assignment - h.assignEncodingAndSize(bitCount, list) -} - -// atLeastOne clamps the result between 1 and 15. -func atLeastOne(v float32) float32 { - if v < 1 { - return 1 - } - if v > 15 { - return 15 - } - return v -} - -func histogram(b []byte, h []uint16) { - if true && len(b) >= 8<<10 { - // Split for bigger inputs - histogramSplit(b, h) - } else { - h = h[:256] - for _, t := range b { - h[t]++ - } - } -} - -func histogramSplit(b []byte, h []uint16) { - // Tested, and slightly faster than 2-way. - // Writing to separate arrays and combining is also slightly slower. - h = h[:256] - for len(b)&3 != 0 { - h[b[0]]++ - b = b[1:] - } - n := len(b) / 4 - x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:] - y, z, w = y[:len(x)], z[:len(x)], w[:len(x)] - for i, t := range x { - v0 := &h[t] - v1 := &h[y[i]] - v3 := &h[w[i]] - v2 := &h[z[i]] - *v0++ - *v1++ - *v2++ - *v3++ - } -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go deleted file mode 100644 index 20778029..00000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByFreq(data []literalNode) { - n := len(data) - quickSortByFreq(data, 0, n, maxDepth(n)) -} - -func quickSortByFreq(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivotByFreq(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSortByFreq(data, a, mlo, maxDepth) - a = mhi // i.e., quickSortByFreq(data, mhi, b) - } else { - quickSortByFreq(data, mhi, b, maxDepth) - b = mlo // i.e., quickSortByFreq(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSortByFreq(data, a, b) - } -} - -// siftDownByFreq implements the heap property on data[lo, hi). -// first is an offset into the array where the root of the heap lies. -func siftDownByFreq(data []literalNode, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) { - child++ - } - if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} -func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s) - medianOfThreeSortByFreq(data, m, m-s, m+s) - medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThreeSortByFreq(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { - } - b := a - for { - for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot - } - for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot - } - for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSortByFreq(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// quickSortByFreq, loosely following Bentley and McIlroy, -// ``Engineering a Sort Function,'' SP&E November 1993. - -// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go deleted file mode 100644 index 93f1aea1..00000000 --- a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -// Sort sorts data. -// It makes one call to data.Len to determine n, and O(n*log(n)) calls to -// data.Less and data.Swap. The sort is not guaranteed to be stable. -func sortByLiteral(data []literalNode) { - n := len(data) - quickSort(data, 0, n, maxDepth(n)) -} - -func quickSort(data []literalNode, a, b, maxDepth int) { - for b-a > 12 { // Use ShellSort for slices <= 12 elements - if maxDepth == 0 { - heapSort(data, a, b) - return - } - maxDepth-- - mlo, mhi := doPivot(data, a, b) - // Avoiding recursion on the larger subproblem guarantees - // a stack depth of at most lg(b-a). - if mlo-a < b-mhi { - quickSort(data, a, mlo, maxDepth) - a = mhi // i.e., quickSort(data, mhi, b) - } else { - quickSort(data, mhi, b, maxDepth) - b = mlo // i.e., quickSort(data, a, mlo) - } - } - if b-a > 1 { - // Do ShellSort pass with gap 6 - // It could be written in this simplified form cause b-a <= 12 - for i := a + 6; i < b; i++ { - if data[i].literal < data[i-6].literal { - data[i], data[i-6] = data[i-6], data[i] - } - } - insertionSort(data, a, b) - } -} -func heapSort(data []literalNode, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDown(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDown(data, lo, i, first) - } -} - -// siftDown implements the heap property on data[lo, hi). -// first is an offset into the array where the root of the heap lies. -func siftDown(data []literalNode, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && data[first+child].literal < data[first+child+1].literal { - child++ - } - if data[first+root].literal > data[first+child].literal { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} -func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) { - m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow. - if hi-lo > 40 { - // Tukey's ``Ninther,'' median of three medians of three. - s := (hi - lo) / 8 - medianOfThree(data, lo, lo+s, lo+2*s) - medianOfThree(data, m, m-s, m+s) - medianOfThree(data, hi-1, hi-1-s, hi-1-2*s) - } - medianOfThree(data, lo, m, hi-1) - - // Invariants are: - // data[lo] = pivot (set up by ChoosePivot) - // data[lo < i < a] < pivot - // data[a <= i < b] <= pivot - // data[b <= i < c] unexamined - // data[c <= i < hi-1] > pivot - // data[hi-1] >= pivot - pivot := lo - a, c := lo+1, hi-1 - - for ; a < c && data[a].literal < data[pivot].literal; a++ { - } - b := a - for { - for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot - } - for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot - } - if b >= c { - break - } - // data[b] > pivot; data[c-1] <= pivot - data[b], data[c-1] = data[c-1], data[b] - b++ - c-- - } - // If hi-c<3 then there are duplicates (by property of median of nine). - // Let's be a bit more conservative, and set border to 5. - protect := hi-c < 5 - if !protect && hi-c < (hi-lo)/4 { - // Lets test some points for equality to pivot - dups := 0 - if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot - data[c], data[hi-1] = data[hi-1], data[c] - c++ - dups++ - } - if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot - b-- - dups++ - } - // m-lo = (hi-lo)/2 > 6 - // b-lo > (hi-lo)*3/4-1 > 8 - // ==> m < b ==> data[m] <= pivot - if data[m].literal > data[pivot].literal { // data[m] = pivot - data[m], data[b-1] = data[b-1], data[m] - b-- - dups++ - } - // if at least 2 points are equal to pivot, assume skewed distribution - protect = dups > 1 - } - if protect { - // Protect against a lot of duplicates - // Add invariant: - // data[a <= i < b] unexamined - // data[b <= i < c] = pivot - for { - for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot - } - for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot - } - if a >= b { - break - } - // data[a] == pivot; data[b-1] < pivot - data[a], data[b-1] = data[b-1], data[a] - a++ - b-- - } - } - // Swap pivot into middle - data[pivot], data[b-1] = data[b-1], data[pivot] - return b - 1, c -} - -// Insertion sort -func insertionSort(data []literalNode, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && data[j].literal < data[j-1].literal; j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// maxDepth returns a threshold at which quicksort should switch -// to heapsort. It returns 2*ceil(lg(n+1)). -func maxDepth(n int) int { - var depth int - for i := n; i > 0; i >>= 1 { - depth++ - } - return depth * 2 -} - -// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1]. -func medianOfThree(data []literalNode, m1, m0, m2 int) { - // sort 3 elements - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - // data[m0] <= data[m1] - if data[m2].literal < data[m1].literal { - data[m2], data[m1] = data[m1], data[m2] - // data[m0] <= data[m2] && data[m1] < data[m2] - if data[m1].literal < data[m0].literal { - data[m1], data[m0] = data[m0], data[m1] - } - } - // now data[m0] <= data[m1] <= data[m2] -} diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go deleted file mode 100644 index 414c0bea..00000000 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ /dev/null @@ -1,793 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package flate implements the DEFLATE compressed data format, described in -// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file -// formats. -package flate - -import ( - "bufio" - "compress/flate" - "fmt" - "io" - "math/bits" - "sync" -) - -const ( - maxCodeLen = 16 // max length of Huffman code - maxCodeLenMask = 15 // mask for max length of Huffman code - // The next three numbers come from the RFC section 3.2.7, with the - // additional proviso in section 3.2.5 which implies that distance codes - // 30 and 31 should never occur in compressed data. - maxNumLit = 286 - maxNumDist = 30 - numCodes = 19 // number of codes in Huffman meta-code - - debugDecode = false -) - -// Value of length - 3 and extra bits. -type lengthExtra struct { - length, extra uint8 -} - -var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}} - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// Initialize the fixedHuffmanDecoder only once upon first use. -var fixedOnce sync.Once -var fixedHuffmanDecoder huffmanDecoder - -// A CorruptInputError reports the presence of corrupt input at a given offset. -type CorruptInputError = flate.CorruptInputError - -// An InternalError reports an error in the flate code itself. -type InternalError string - -func (e InternalError) Error() string { return "flate: internal error: " + string(e) } - -// A ReadError reports an error encountered while reading input. -// -// Deprecated: No longer returned. -type ReadError = flate.ReadError - -// A WriteError reports an error encountered while writing output. -// -// Deprecated: No longer returned. -type WriteError = flate.WriteError - -// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to -// to switch to a new underlying Reader. This permits reusing a ReadCloser -// instead of allocating a new one. -type Resetter interface { - // Reset discards any buffered data and resets the Resetter as if it was - // newly initialized with the given reader. - Reset(r io.Reader, dict []byte) error -} - -// The data structure for decoding Huffman tables is based on that of -// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), -// For codes smaller than the table width, there are multiple entries -// (each combination of trailing bits has the same value). For codes -// larger than the table width, the table contains a link to an overflow -// table. The width of each entry in the link table is the maximum code -// size minus the chunk width. -// -// Note that you can do a lookup in the table even without all bits -// filled. Since the extra bits are zero, and the DEFLATE Huffman codes -// have the property that shorter codes come before longer ones, the -// bit length estimate in the result is a lower bound on the actual -// number of bits. -// -// See the following: -// http://www.gzip.org/algorithm.txt - -// chunk & 15 is number of bits -// chunk >> 4 is value, including table link - -const ( - huffmanChunkBits = 9 - huffmanNumChunks = 1 << huffmanChunkBits - huffmanCountMask = 15 - huffmanValueShift = 4 -) - -type huffmanDecoder struct { - maxRead int // the maximum number of bits we can read and not overread - chunks *[huffmanNumChunks]uint16 // chunks as described above - links [][]uint16 // overflow links - linkMask uint32 // mask the width of the link table -} - -// Initialize Huffman decoding tables from array of code lengths. -// Following this function, h is guaranteed to be initialized into a complete -// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a -// degenerate case where the tree has only a single symbol with length 1. Empty -// trees are permitted. -func (h *huffmanDecoder) init(lengths []int) bool { - // Sanity enables additional runtime tests during Huffman - // table construction. It's intended to be used during - // development to supplement the currently ad-hoc unit tests. - const sanity = false - - if h.chunks == nil { - h.chunks = &[huffmanNumChunks]uint16{} - } - if h.maxRead != 0 { - *h = huffmanDecoder{chunks: h.chunks, links: h.links} - } - - // Count number of codes of each length, - // compute maxRead and max length. - var count [maxCodeLen]int - var min, max int - for _, n := range lengths { - if n == 0 { - continue - } - if min == 0 || n < min { - min = n - } - if n > max { - max = n - } - count[n&maxCodeLenMask]++ - } - - // Empty tree. The decompressor.huffSym function will fail later if the tree - // is used. Technically, an empty tree is only valid for the HDIST tree and - // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree - // is guaranteed to fail since it will attempt to use the tree to decode the - // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is - // guaranteed to fail later since the compressed data section must be - // composed of at least one symbol (the end-of-block marker). - if max == 0 { - return true - } - - code := 0 - var nextcode [maxCodeLen]int - for i := min; i <= max; i++ { - code <<= 1 - nextcode[i&maxCodeLenMask] = code - code += count[i&maxCodeLenMask] - } - - // Check that the coding is complete (i.e., that we've - // assigned all 2-to-the-max possible bit sequences). - // Exception: To be compatible with zlib, we also need to - // accept degenerate single-code codings. See also - // TestDegenerateHuffmanCoding. - if code != 1< huffmanChunkBits { - numLinks := 1 << (uint(max) - huffmanChunkBits) - h.linkMask = uint32(numLinks - 1) - - // create link tables - link := nextcode[huffmanChunkBits+1] >> 1 - if cap(h.links) < huffmanNumChunks-link { - h.links = make([][]uint16, huffmanNumChunks-link) - } else { - h.links = h.links[:huffmanNumChunks-link] - } - for j := uint(link); j < huffmanNumChunks; j++ { - reverse := int(bits.Reverse16(uint16(j))) - reverse >>= uint(16 - huffmanChunkBits) - off := j - uint(link) - if sanity && h.chunks[reverse] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[reverse] = uint16(off<>= uint(16 - n) - if n <= huffmanChunkBits { - for off := reverse; off < len(h.chunks); off += 1 << uint(n) { - // We should never need to overwrite - // an existing chunk. Also, 0 is - // never a valid chunk, because the - // lower 4 "count" bits should be - // between 1 and 15. - if sanity && h.chunks[off] != 0 { - panic("impossible: overwriting existing chunk") - } - h.chunks[off] = chunk - } - } else { - j := reverse & (huffmanNumChunks - 1) - if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { - // Longer codes should have been - // associated with a link table above. - panic("impossible: not an indirect chunk") - } - value := h.chunks[j] >> huffmanValueShift - linktab := h.links[value] - reverse >>= huffmanChunkBits - for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { - if sanity && linktab[off] != 0 { - panic("impossible: overwriting existing chunk") - } - linktab[off] = chunk - } - } - } - - if sanity { - // Above we've sanity checked that we never overwrote - // an existing entry. Here we additionally check that - // we filled the tables completely. - for i, chunk := range h.chunks { - if chunk == 0 { - // As an exception, in the degenerate - // single-code case, we allow odd - // chunks to be missing. - if code == 1 && i%2 == 1 { - continue - } - panic("impossible: missing chunk") - } - } - for _, linktab := range h.links { - for _, chunk := range linktab { - if chunk == 0 { - panic("impossible: missing chunk") - } - } - } - } - - return true -} - -// The actual read interface needed by NewReader. -// If the passed in io.Reader does not also have ReadByte, -// the NewReader will introduce its own buffering. -type Reader interface { - io.Reader - io.ByteReader -} - -// Decompress state. -type decompressor struct { - // Input source. - r Reader - roffset int64 - - // Huffman decoders for literal/length, distance. - h1, h2 huffmanDecoder - - // Length arrays used to define Huffman codes. - bits *[maxNumLit + maxNumDist]int - codebits *[numCodes]int - - // Output history, buffer. - dict dictDecoder - - // Next step in the decompression, - // and decompression state. - step func(*decompressor) - stepState int - err error - toRead []byte - hl, hd *huffmanDecoder - copyLen int - copyDist int - - // Temporary buffer (avoids repeated allocation). - buf [4]byte - - // Input bits, in top of b. - b uint32 - - nb uint - final bool -} - -func (f *decompressor) nextBlock() { - for f.nb < 1+2 { - if f.err = f.moreBits(); f.err != nil { - return - } - } - f.final = f.b&1 == 1 - f.b >>= 1 - typ := f.b & 3 - f.b >>= 2 - f.nb -= 1 + 2 - switch typ { - case 0: - f.dataBlock() - if debugDecode { - fmt.Println("stored block") - } - case 1: - // compressed, fixed Huffman tables - f.hl = &fixedHuffmanDecoder - f.hd = nil - f.huffmanBlockDecoder()() - if debugDecode { - fmt.Println("predefinied huffman block") - } - case 2: - // compressed, dynamic Huffman tables - if f.err = f.readHuffman(); f.err != nil { - break - } - f.hl = &f.h1 - f.hd = &f.h2 - f.huffmanBlockDecoder()() - if debugDecode { - fmt.Println("dynamic huffman block") - } - default: - // 3 is reserved. - if debugDecode { - fmt.Println("reserved data block encountered") - } - f.err = CorruptInputError(f.roffset) - } -} - -func (f *decompressor) Read(b []byte) (int, error) { - for { - if len(f.toRead) > 0 { - n := copy(b, f.toRead) - f.toRead = f.toRead[n:] - if len(f.toRead) == 0 { - return n, f.err - } - return n, nil - } - if f.err != nil { - return 0, f.err - } - f.step(f) - if f.err != nil && len(f.toRead) == 0 { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - } - } -} - -// Support the io.WriteTo interface for io.Copy and friends. -func (f *decompressor) WriteTo(w io.Writer) (int64, error) { - total := int64(0) - flushed := false - for { - if len(f.toRead) > 0 { - n, err := w.Write(f.toRead) - total += int64(n) - if err != nil { - f.err = err - return total, err - } - if n != len(f.toRead) { - return total, io.ErrShortWrite - } - f.toRead = f.toRead[:0] - } - if f.err != nil && flushed { - if f.err == io.EOF { - return total, nil - } - return total, f.err - } - if f.err == nil { - f.step(f) - } - if len(f.toRead) == 0 && f.err != nil && !flushed { - f.toRead = f.dict.readFlush() // Flush what's left in case of error - flushed = true - } - } -} - -func (f *decompressor) Close() error { - if f.err == io.EOF { - return nil - } - return f.err -} - -// RFC 1951 section 3.2.7. -// Compression with dynamic Huffman codes - -var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} - -func (f *decompressor) readHuffman() error { - // HLIT[5], HDIST[5], HCLEN[4]. - for f.nb < 5+5+4 { - if err := f.moreBits(); err != nil { - return err - } - } - nlit := int(f.b&0x1F) + 257 - if nlit > maxNumLit { - if debugDecode { - fmt.Println("nlit > maxNumLit", nlit) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - ndist := int(f.b&0x1F) + 1 - if ndist > maxNumDist { - if debugDecode { - fmt.Println("ndist > maxNumDist", ndist) - } - return CorruptInputError(f.roffset) - } - f.b >>= 5 - nclen := int(f.b&0xF) + 4 - // numCodes is 19, so nclen is always valid. - f.b >>= 4 - f.nb -= 5 + 5 + 4 - - // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. - for i := 0; i < nclen; i++ { - for f.nb < 3 { - if err := f.moreBits(); err != nil { - return err - } - } - f.codebits[codeOrder[i]] = int(f.b & 0x7) - f.b >>= 3 - f.nb -= 3 - } - for i := nclen; i < len(codeOrder); i++ { - f.codebits[codeOrder[i]] = 0 - } - if !f.h1.init(f.codebits[0:]) { - if debugDecode { - fmt.Println("init codebits failed") - } - return CorruptInputError(f.roffset) - } - - // HLIT + 257 code lengths, HDIST + 1 code lengths, - // using the code length Huffman code. - for i, n := 0, nlit+ndist; i < n; { - x, err := f.huffSym(&f.h1) - if err != nil { - return err - } - if x < 16 { - // Actual length. - f.bits[i] = x - i++ - continue - } - // Repeat previous length or zero. - var rep int - var nb uint - var b int - switch x { - default: - return InternalError("unexpected length code") - case 16: - rep = 3 - nb = 2 - if i == 0 { - if debugDecode { - fmt.Println("i==0") - } - return CorruptInputError(f.roffset) - } - b = f.bits[i-1] - case 17: - rep = 3 - nb = 3 - b = 0 - case 18: - rep = 11 - nb = 7 - b = 0 - } - for f.nb < nb { - if err := f.moreBits(); err != nil { - if debugDecode { - fmt.Println("morebits:", err) - } - return err - } - } - rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1)) - f.b >>= nb & regSizeMaskUint32 - f.nb -= nb - if i+rep > n { - if debugDecode { - fmt.Println("i+rep > n", i, rep, n) - } - return CorruptInputError(f.roffset) - } - for j := 0; j < rep; j++ { - f.bits[i] = b - i++ - } - } - - if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { - if debugDecode { - fmt.Println("init2 failed") - } - return CorruptInputError(f.roffset) - } - - // As an optimization, we can initialize the maxRead bits to read at a time - // for the HLIT tree to the length of the EOB marker since we know that - // every block must terminate with one. This preserves the property that - // we never read any extra bytes after the end of the DEFLATE stream. - if f.h1.maxRead < f.bits[endBlockMarker] { - f.h1.maxRead = f.bits[endBlockMarker] - } - if !f.final { - // If not the final block, the smallest block possible is - // a predefined table, BTYPE=01, with a single EOB marker. - // This will take up 3 + 7 bits. - f.h1.maxRead += 10 - } - - return nil -} - -// Copy a single uncompressed data block from input to output. -func (f *decompressor) dataBlock() { - // Uncompressed. - // Discard current half-byte. - left := (f.nb) & 7 - f.nb -= left - f.b >>= left - - offBytes := f.nb >> 3 - // Unfilled values will be overwritten. - f.buf[0] = uint8(f.b) - f.buf[1] = uint8(f.b >> 8) - f.buf[2] = uint8(f.b >> 16) - f.buf[3] = uint8(f.b >> 24) - - f.roffset += int64(offBytes) - f.nb, f.b = 0, 0 - - // Length then ones-complement of length. - nr, err := io.ReadFull(f.r, f.buf[offBytes:4]) - f.roffset += int64(nr) - if err != nil { - f.err = noEOF(err) - return - } - n := uint16(f.buf[0]) | uint16(f.buf[1])<<8 - nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8 - if nn != ^n { - if debugDecode { - ncomp := ^n - fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp) - } - f.err = CorruptInputError(f.roffset) - return - } - - if n == 0 { - f.toRead = f.dict.readFlush() - f.finishBlock() - return - } - - f.copyLen = int(n) - f.copyData() -} - -// copyData copies f.copyLen bytes from the underlying reader into f.hist. -// It pauses for reads when f.hist is full. -func (f *decompressor) copyData() { - buf := f.dict.writeSlice() - if len(buf) > f.copyLen { - buf = buf[:f.copyLen] - } - - cnt, err := io.ReadFull(f.r, buf) - f.roffset += int64(cnt) - f.copyLen -= cnt - f.dict.writeMark(cnt) - if err != nil { - f.err = noEOF(err) - return - } - - if f.dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).copyData - return - } - f.finishBlock() -} - -func (f *decompressor) finishBlock() { - if f.final { - if f.dict.availRead() > 0 { - f.toRead = f.dict.readFlush() - } - f.err = io.EOF - } - f.step = (*decompressor).nextBlock -} - -// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF. -func noEOF(e error) error { - if e == io.EOF { - return io.ErrUnexpectedEOF - } - return e -} - -func (f *decompressor) moreBits() error { - c, err := f.r.ReadByte() - if err != nil { - return noEOF(err) - } - f.roffset++ - f.b |= uint32(c) << (f.nb & regSizeMaskUint32) - f.nb += 8 - return nil -} - -// Read the next Huffman-encoded symbol from f according to h. -func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(h.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - nb, b := f.nb, f.b - for { - for nb < n { - c, err := f.r.ReadByte() - if err != nil { - f.b = b - f.nb = nb - return 0, noEOF(err) - } - f.roffset++ - b |= uint32(c) << (nb & regSizeMaskUint32) - nb += 8 - } - chunk := h.chunks[b&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= nb { - if n == 0 { - f.b = b - f.nb = nb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return 0, f.err - } - f.b = b >> (n & regSizeMaskUint32) - f.nb = nb - n - return int(chunk >> huffmanValueShift), nil - } - } -} - -func makeReader(r io.Reader) Reader { - if rr, ok := r.(Reader); ok { - return rr - } - return bufio.NewReader(r) -} - -func fixedHuffmanDecoderInit() { - fixedOnce.Do(func() { - // These come from the RFC section 3.2.6. - var bits [288]int - for i := 0; i < 144; i++ { - bits[i] = 8 - } - for i := 144; i < 256; i++ { - bits[i] = 9 - } - for i := 256; i < 280; i++ { - bits[i] = 7 - } - for i := 280; i < 288; i++ { - bits[i] = 8 - } - fixedHuffmanDecoder.init(bits[:]) - }) -} - -func (f *decompressor) Reset(r io.Reader, dict []byte) error { - *f = decompressor{ - r: makeReader(r), - bits: f.bits, - codebits: f.codebits, - h1: f.h1, - h2: f.h2, - dict: f.dict, - step: (*decompressor).nextBlock, - } - f.dict.init(maxMatchOffset, dict) - return nil -} - -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = (*decompressor).nextBlock - f.dict.init(maxMatchOffset, nil) - return &f -} - -// NewReaderDict is like NewReader but initializes the reader -// with a preset dictionary. The returned Reader behaves as if -// the uncompressed data stream started with the given dictionary, -// which has already been read. NewReaderDict is typically used -// to read data compressed by NewWriterDict. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = (*decompressor).nextBlock - f.dict.init(maxMatchOffset, dict) - return &f -} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go deleted file mode 100644 index 61342b6b..00000000 --- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go +++ /dev/null @@ -1,1283 +0,0 @@ -// Code generated by go generate gen_inflate.go. DO NOT EDIT. - -package flate - -import ( - "bufio" - "bytes" - "fmt" - "math/bits" - "strings" -) - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesBuffer() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Buffer) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesBuffer - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBytesReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bytes.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBytesReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanBufioReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*bufio.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBufioReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanBufioReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanStringsReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(*strings.Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanStringsReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanStringsReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -// Decode a single Huffman block from f. -// hl and hd are the Huffman states for the lit/length values -// and the distance values, respectively. If hd == nil, using the -// fixed distance encoding associated with fixed Huffman blocks. -func (f *decompressor) huffmanGenericReader() { - const ( - stateInit = iota // Zero value must be stateInit - stateDict - ) - fr := f.r.(Reader) - - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - fnb, fb, dict := f.nb, f.b, &f.dict - - switch f.stepState { - case stateInit: - goto readLiteral - case stateDict: - goto copyHistory - } - -readLiteral: - // Read literal and/or (length, distance) according to RFC section 3.2.3. - { - var v int - { - // Inlined v, err := f.huffSym(f.hl) - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hl.maxRead) - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hl.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - v = int(chunk >> huffmanValueShift) - break - } - } - } - - var length int - switch { - case v < 256: - dict.writeByte(byte(v)) - if dict.availWrite() == 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanGenericReader - f.stepState = stateInit - f.b, f.nb = fb, fnb - return - } - goto readLiteral - case v == 256: - f.b, f.nb = fb, fnb - f.finishBlock() - return - // otherwise, reference to older data - case v < 265: - length = v - (257 - 3) - case v < maxNumLit: - val := decCodeToLen[(v - 257)] - length = int(val.length) + 3 - n := uint(val.extra) - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits n>0:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - length += int(fb & bitMask32[n]) - fb >>= n & regSizeMaskUint32 - fnb -= n - default: - if debugDecode { - fmt.Println(v, ">= maxNumLit") - } - f.err = CorruptInputError(f.roffset) - f.b, f.nb = fb, fnb - return - } - - var dist uint32 - if f.hd == nil { - for fnb < 5 { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb<5:", err) - } - f.err = err - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3))) - fb >>= 5 - fnb -= 5 - } else { - // Since a huffmanDecoder can be empty or be composed of a degenerate tree - // with single element, huffSym must error on these two edge cases. In both - // cases, the chunks slice will be 0 for the invalid sequence, leading it - // satisfy the n == 0 check below. - n := uint(f.hd.maxRead) - // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, - // but is smart enough to keep local variables in registers, so use nb and b, - // inline call to moreBits and reassign b,nb back to f on return. - for { - for fnb < n { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - f.err = noEOF(err) - return - } - f.roffset++ - fb |= uint32(c) << (fnb & regSizeMaskUint32) - fnb += 8 - } - chunk := f.hd.chunks[fb&(huffmanNumChunks-1)] - n = uint(chunk & huffmanCountMask) - if n > huffmanChunkBits { - chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask] - n = uint(chunk & huffmanCountMask) - } - if n <= fnb { - if n == 0 { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("huffsym: n==0") - } - f.err = CorruptInputError(f.roffset) - return - } - fb = fb >> (n & regSizeMaskUint32) - fnb = fnb - n - dist = uint32(chunk >> huffmanValueShift) - break - } - } - } - - switch { - case dist < 4: - dist++ - case dist < maxNumDist: - nb := uint(dist-2) >> 1 - // have 1 bit in bottom of dist, need nb more. - extra := (dist & 1) << (nb & regSizeMaskUint32) - for fnb < nb { - c, err := fr.ReadByte() - if err != nil { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("morebits f.nb>= nb & regSizeMaskUint32 - fnb -= nb - dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra - // slower: dist = bitMask32[nb+1] + 2 + extra - default: - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist too big:", dist, maxNumDist) - } - f.err = CorruptInputError(f.roffset) - return - } - - // No check on length; encoding can be prescient. - if dist > uint32(dict.histSize()) { - f.b, f.nb = fb, fnb - if debugDecode { - fmt.Println("dist > dict.histSize():", dist, dict.histSize()) - } - f.err = CorruptInputError(f.roffset) - return - } - - f.copyLen, f.copyDist = length, int(dist) - goto copyHistory - } - -copyHistory: - // Perform a backwards copy according to RFC section 3.2.3. - { - cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) - if cnt == 0 { - cnt = dict.writeCopy(f.copyDist, f.copyLen) - } - f.copyLen -= cnt - - if dict.availWrite() == 0 || f.copyLen > 0 { - f.toRead = dict.readFlush() - f.step = (*decompressor).huffmanGenericReader // We need to continue this work - f.stepState = stateDict - f.b, f.nb = fb, fnb - return - } - goto readLiteral - } - // Not reached -} - -func (f *decompressor) huffmanBlockDecoder() func() { - switch f.r.(type) { - case *bytes.Buffer: - return f.huffmanBytesBuffer - case *bytes.Reader: - return f.huffmanBytesReader - case *bufio.Reader: - return f.huffmanBufioReader - case *strings.Reader: - return f.huffmanStringsReader - case Reader: - return f.huffmanGenericReader - default: - return f.huffmanGenericReader - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go deleted file mode 100644 index 703b9a89..00000000 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ /dev/null @@ -1,241 +0,0 @@ -package flate - -import ( - "encoding/binary" - "fmt" - "math/bits" -) - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL1 struct { - fastGen - table [tableSize]tableEntry -} - -// EncodeL1 uses a similar algorithm to level 1 -func (e *fastEncL1) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - candidate = e.table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, tableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - cv = now - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - var l = int32(4) - if false { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else { - // inlined: - a := src[s+4:] - b := src[t+4:] - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - l += int32(bits.TrailingZeros64(diff) >> 3) - break - } - l += 8 - a = a[8:] - b = b[8:] - } - if len(a) < 8 { - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - break - } - l++ - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - if false { - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - } else { - // Inlined... - xoffset := uint32(s - t - baseMatchOffset) - xlength := l - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - dst.extraHist[lengthCodes1[uint8(xl)]]++ - dst.offHist[oc]++ - dst.tokens[dst.n] = token(matchType | uint32(xl)<= s { - s = nextS + 1 - } - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, tableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashLen(x, tableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { - cv = x >> 8 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go deleted file mode 100644 index 876dfbe3..00000000 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ /dev/null @@ -1,214 +0,0 @@ -package flate - -import "fmt" - -// fastGen maintains the table for matches, -// and the previous byte block for level 2. -// This is the generic implementation. -type fastEncL2 struct { - fastGen - table [bTableSize]tableEntry -} - -// EncodeL2 uses a similar algorithm to level 1, but is capable -// of matching across blocks giving better compression at a small slowdown. -func (e *fastEncL2) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - // When should we start skipping if we haven't found matches in a long while. - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, bTableBits, hashBytes) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidate = e.table[nextHash] - now := load6432(src, nextS) - e.table[nextHash] = tableEntry{offset: s + e.cur} - nextHash = hashLen(now, bTableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - e.table[nextHash] = tableEntry{offset: nextS + e.cur} - break - } - - // Do one right away... - cv = now - s = nextS - nextS++ - candidate = e.table[nextHash] - now >>= 8 - e.table[nextHash] = tableEntry{offset: s + e.cur} - - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - cv = now - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+l+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every second hash in-between, but offset by 1. - for i := s - l + 2; i < s-5; i += 7 { - x := load6432(src, i) - nextHash := hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 2} - // Skip one - x >>= 16 - nextHash = hashLen(x, bTableBits, hashBytes) - e.table[nextHash] = tableEntry{offset: e.cur + i + 4} - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6432(src, s-2) - o := e.cur + s - 2 - prevHash := hashLen(x, bTableBits, hashBytes) - prevHash2 := hashLen(x>>8, bTableBits, hashBytes) - e.table[prevHash] = tableEntry{offset: o} - e.table[prevHash2] = tableEntry{offset: o + 1} - currHash := hashLen(x>>16, bTableBits, hashBytes) - candidate = e.table[currHash] - e.table[currHash] = tableEntry{offset: o + 2} - - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) { - cv = x >> 24 - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go deleted file mode 100644 index 7aa2b72a..00000000 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ /dev/null @@ -1,241 +0,0 @@ -package flate - -import "fmt" - -// fastEncL3 -type fastEncL3 struct { - fastGen - table [1 << 16]tableEntryPrev -} - -// Encode uses a similar algorithm to level 2, will check up to two candidates. -func (e *fastEncL3) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - tableBits = 16 - tableSize = 1 << tableBits - hashBytes = 5 - ) - - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - } - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - e.table[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // Skip if too small. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 7 - nextS := s - var candidate tableEntry - for { - nextHash := hashLen(cv, tableBits, hashBytes) - s = nextS - nextS = s + 1 + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - candidates := e.table[nextHash] - now := load6432(src, nextS) - - // Safe offset distance until s + 4... - minOffset := e.cur + s - (maxMatchOffset - 4) - e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}} - - // Check both candidates - candidate = candidates.Cur - if candidate.offset < minOffset { - cv = now - // Previous will also be invalid, we have nothing. - continue - } - - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) { - break - } - // Both match and are valid, pick longest. - offset := s - (candidate.offset - e.cur) - o2 := s - (candidates.Prev.offset - e.cur) - l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:]) - if l2 > l1 { - candidate = candidates.Prev - } - break - } else { - // We only check if value mismatches. - // Offset will always be invalid in other cases. - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - break - } - } - cv = now - } - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - // - t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - t += l - // Index first pair after match end. - if int(t+8) < len(src) && t > 0 { - cv = load6432(src, t) - nextHash := hashLen(cv, tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + t}, - } - } - goto emitRemainder - } - - // Store every 5th hash in-between. - for i := s - l + 2; i < s-5; i += 6 { - nextHash := hashLen(load6432(src, i), tableBits, hashBytes) - e.table[nextHash] = tableEntryPrev{ - Prev: e.table[nextHash].Cur, - Cur: tableEntry{offset: e.cur + i}} - } - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 to s. - x := load6432(src, s-2) - prevHash := hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 2}, - } - x >>= 8 - prevHash = hashLen(x, tableBits, hashBytes) - - e.table[prevHash] = tableEntryPrev{ - Prev: e.table[prevHash].Cur, - Cur: tableEntry{offset: e.cur + s - 1}, - } - x >>= 8 - currHash := hashLen(x, tableBits, hashBytes) - candidates := e.table[currHash] - cv = x - e.table[currHash] = tableEntryPrev{ - Prev: candidates.Cur, - Cur: tableEntry{offset: s + e.cur}, - } - - // Check both candidates - candidate = candidates.Cur - minOffset := e.cur + s - (maxMatchOffset - 4) - - if candidate.offset > minOffset { - if uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Found a match... - continue - } - candidate = candidates.Prev - if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { - // Match at prev... - continue - } - } - cv = x >> 8 - s++ - break - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go deleted file mode 100644 index 23c08b32..00000000 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ /dev/null @@ -1,221 +0,0 @@ -package flate - -import "fmt" - -type fastEncL4 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntry -} - -func (e *fastEncL4) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntry{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.bTable[i].offset = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - e.bTable[nextHashL] = entry - - t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { - // We got a long match. Use that. - break - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - lCandidate = e.bTable[hash7(next, tableBits)] - - // If the next long is a candidate, check if we should use that instead... - lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { - l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) - if l2 > l1 { - s = nextS - t = lCandidate.offset - e.cur - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - l := e.matchlenLong(s+4, t+4, src) + 4 - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic("s-t") - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index first pair after match end. - if int(s+8) < len(src) { - cv := load6432(src, s) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur} - e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur} - } - goto emitRemainder - } - - // Store every 3rd hash in-between - if true { - i := nextS - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - - i += 3 - for ; i < s-1; i += 3 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - e.bTable[hash7(cv, tableBits)] = t - e.bTable[hash7(cv>>8, tableBits)] = t2 - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - e.bTable[prevHashL] = tableEntry{offset: o} - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go deleted file mode 100644 index 83ef50ba..00000000 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ /dev/null @@ -1,310 +0,0 @@ -package flate - -import "fmt" - -type fastEncL5 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL5) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - for { - const skipLog = 6 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - lCandidate = e.bTable[nextHashL] - // Store the next match - - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // If the next long is a candidate, use that... - t2 := lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - if l == 0 { - // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end of best match... - if sAt := s + l; l < 30 && sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset - t2 := eLong - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if debugDeflate { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - goto emitRemainder - } - - // Store every 3rd hash in-between. - if true { - const hashEvery = 3 - i := s - l + 1 - if i < s-1 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // Do an long at i+1 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - eLong = &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - - // We only have enough bits for a short entry at i+2 - cv >>= 8 - t = tableEntry{offset: t.offset + 1} - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - - // Skip one - otherwise we risk hitting 's' - i += 4 - for ; i < s-1; i += hashEvery { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = t, eLong.Cur - e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2 - } - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - x := load6432(src, s-1) - o := e.cur + s - 1 - prevHashS := hashLen(x, tableBits, hashShortBytes) - prevHashL := hash7(x, tableBits) - e.table[prevHashS] = tableEntry{offset: o} - eLong := &e.bTable[prevHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur - cv = x >> 8 - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go deleted file mode 100644 index f1e9d98f..00000000 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ /dev/null @@ -1,325 +0,0 @@ -package flate - -import "fmt" - -type fastEncL6 struct { - fastGen - table [tableSize]tableEntry - bTable [tableSize]tableEntryPrev -} - -func (e *fastEncL6) Encode(dst *tokens, src []byte) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - hashShortBytes = 4 - ) - if debugDeflate && e.cur < 0 { - panic(fmt.Sprint("e.cur < 0: ", e.cur)) - } - - // Protect against e.cur wraparound. - for e.cur >= bufferReset { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.bTable[:] { - e.bTable[i] = tableEntryPrev{} - } - e.cur = maxMatchOffset - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - maxMatchOffset - for i := range e.table[:] { - v := e.table[i].offset - if v <= minOff { - v = 0 - } else { - v = v - e.cur + maxMatchOffset - } - e.table[i].offset = v - } - for i := range e.bTable[:] { - v := e.bTable[i] - if v.Cur.offset <= minOff { - v.Cur.offset = 0 - v.Prev.offset = 0 - } else { - v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset - if v.Prev.offset <= minOff { - v.Prev.offset = 0 - } else { - v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset - } - } - e.bTable[i] = v - } - e.cur = maxMatchOffset - } - - s := e.addBlock(src) - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = uint16(len(src)) - return - } - - // Override src - src = e.hist - nextEmit := s - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int32(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load6432(src, s) - // Repeat MUST be > 1 and within range - repeat := int32(1) - for { - const skipLog = 7 - const doEvery = 1 - - nextS := s - var l int32 - var t int32 - for { - nextHashS := hashLen(cv, tableBits, hashShortBytes) - nextHashL := hash7(cv, tableBits) - s = nextS - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit { - goto emitRemainder - } - // Fetch a short+long candidate - sCandidate := e.table[nextHashS] - lCandidate := e.bTable[nextHashL] - next := load6432(src, nextS) - entry := tableEntry{offset: s + e.cur} - e.table[nextHashS] = entry - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = entry, eLong.Cur - - // Calculate hashes of 'next' - nextHashS = hashLen(next, tableBits, hashShortBytes) - nextHashL = hash7(next, tableBits) - - t = lCandidate.Cur.offset - e.cur - if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { - // Long candidate matches at least 4 bytes. - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check the previous long candidate as well. - t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 - if ml1 > l { - t = t2 - l = ml1 - break - } - } - break - } - // Current value did not match, but check if previous long value does. - t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - break - } - } - - t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { - // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 - - // Look up next long candidate (at nextS) - lCandidate = e.bTable[nextHashL] - - // Store the next match - e.table[nextHashS] = tableEntry{offset: nextS + e.cur} - eLong := &e.bTable[nextHashL] - eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur - - // Check repeat at s + repOff - const repOff = 1 - t2 := s - repeat + repOff - if load3232(src, t2) == uint32(cv>>(8*repOff)) { - ml := e.matchlen(s+4+repOff, t2+4, src) + 4 - if ml > l { - t = t2 - l = ml - s += repOff - // Not worth checking more. - break - } - } - - // If the next long is a candidate, use that... - t2 = lCandidate.Cur.offset - e.cur - if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - // This is ok, but check previous as well. - } - } - // If the previous long is a candidate, use that... - t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 - if ml > l { - t = t2 - s = nextS - l = ml - break - } - } - } - break - } - cv = next - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - - // Extend the 4-byte match as long as possible. - if l == 0 { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) - } - - // Try to locate a better match by checking the end-of-match... - if sAt := s + l; sAt < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is 2/3 bytes depending on input. - // 3 is only a little better when it is but sometimes a lot worse. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 2 - eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)] - // Test current - t2 := eLong.Cur.offset - e.cur - l + skipBeginning - s2 := s + skipBeginning - off := s2 - t2 - if off < maxMatchOffset { - if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - // Test next: - t2 = eLong.Prev.offset - e.cur - l + skipBeginning - off := s2 - t2 - if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { - t = t2 - l = l2 - s = s2 - } - } - } - } - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - if false { - if t >= s { - panic(fmt.Sprintln("s-t", s, t)) - } - if (s - t) > maxMatchOffset { - panic(fmt.Sprintln("mmo", s-t)) - } - if l < baseMatchLength { - panic("bml") - } - } - - dst.AddMatchLong(l, uint32(s-t-baseMatchOffset)) - repeat = s - t - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - - if s >= sLimit { - // Index after match end. - for i := nextS + 1; i < int32(len(src))-8; i += 2 { - cv := load6432(src, i) - e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur - } - goto emitRemainder - } - - // Store every long hash in-between and every second short. - if true { - for i := nextS + 1; i < s-1; i += 2 { - cv := load6432(src, i) - t := tableEntry{offset: i + e.cur} - t2 := tableEntry{offset: t.offset + 1} - eLong := &e.bTable[hash7(cv, tableBits)] - eLong2 := &e.bTable[hash7(cv>>8, tableBits)] - e.table[hashLen(cv, tableBits, hashShortBytes)] = t - eLong.Cur, eLong.Prev = t, eLong.Cur - eLong2.Cur, eLong2.Prev = t2, eLong2.Cur - } - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. - cv = load6432(src, s) - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go deleted file mode 100644 index 6ed28061..00000000 --- a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go +++ /dev/null @@ -1,37 +0,0 @@ -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 7 - reg8SizeMask16 = 15 - reg8SizeMask32 = 31 - reg8SizeMask64 = 63 - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = reg8SizeMask8 - reg16SizeMask16 = reg8SizeMask16 - reg16SizeMask32 = reg8SizeMask32 - reg16SizeMask64 = reg8SizeMask64 - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = reg8SizeMask8 - reg32SizeMask16 = reg8SizeMask16 - reg32SizeMask32 = reg8SizeMask32 - reg32SizeMask64 = reg8SizeMask64 - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = reg8SizeMask8 - reg64SizeMask16 = reg8SizeMask16 - reg64SizeMask32 = reg8SizeMask32 - reg64SizeMask64 = reg8SizeMask64 - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = reg8SizeMask8 - regSizeMaskUint16 = reg8SizeMask16 - regSizeMaskUint32 = reg8SizeMask32 - regSizeMaskUint64 = reg8SizeMask64 -) diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go deleted file mode 100644 index 1b7a2cbd..00000000 --- a/vendor/github.com/klauspost/compress/flate/regmask_other.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !amd64 -// +build !amd64 - -package flate - -const ( - // Masks for shifts with register sizes of the shift value. - // This can be used to work around the x86 design of shifting by mod register size. - // It can be used when a variable shift is always smaller than the register size. - - // reg8SizeMaskX - shift value is 8 bits, shifted is X - reg8SizeMask8 = 0xff - reg8SizeMask16 = 0xff - reg8SizeMask32 = 0xff - reg8SizeMask64 = 0xff - - // reg16SizeMaskX - shift value is 16 bits, shifted is X - reg16SizeMask8 = 0xffff - reg16SizeMask16 = 0xffff - reg16SizeMask32 = 0xffff - reg16SizeMask64 = 0xffff - - // reg32SizeMaskX - shift value is 32 bits, shifted is X - reg32SizeMask8 = 0xffffffff - reg32SizeMask16 = 0xffffffff - reg32SizeMask32 = 0xffffffff - reg32SizeMask64 = 0xffffffff - - // reg64SizeMaskX - shift value is 64 bits, shifted is X - reg64SizeMask8 = 0xffffffffffffffff - reg64SizeMask16 = 0xffffffffffffffff - reg64SizeMask32 = 0xffffffffffffffff - reg64SizeMask64 = 0xffffffffffffffff - - // regSizeMaskUintX - shift value is uint, shifted is X - regSizeMaskUint8 = ^uint(0) - regSizeMaskUint16 = ^uint(0) - regSizeMaskUint32 = ^uint(0) - regSizeMaskUint64 = ^uint(0) -) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go deleted file mode 100644 index f3d4139e..00000000 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ /dev/null @@ -1,318 +0,0 @@ -package flate - -import ( - "io" - "math" - "sync" -) - -const ( - maxStatelessBlock = math.MaxInt16 - // dictionary will be taken from maxStatelessBlock, so limit it. - maxStatelessDict = 8 << 10 - - slTableBits = 13 - slTableSize = 1 << slTableBits - slTableShift = 32 - slTableBits -) - -type statelessWriter struct { - dst io.Writer - closed bool -} - -func (s *statelessWriter) Close() error { - if s.closed { - return nil - } - s.closed = true - // Emit EOF block - return StatelessDeflate(s.dst, nil, true, nil) -} - -func (s *statelessWriter) Write(p []byte) (n int, err error) { - err = StatelessDeflate(s.dst, p, false, nil) - if err != nil { - return 0, err - } - return len(p), nil -} - -func (s *statelessWriter) Reset(w io.Writer) { - s.dst = w - s.closed = false -} - -// NewStatelessWriter will do compression but without maintaining any state -// between Write calls. -// There will be no memory kept between Write calls, -// but compression and speed will be suboptimal. -// Because of this, the size of actual Write calls will affect output size. -func NewStatelessWriter(dst io.Writer) io.WriteCloser { - return &statelessWriter{dst: dst} -} - -// bitWriterPool contains bit writers that can be reused. -var bitWriterPool = sync.Pool{ - New: func() interface{} { - return newHuffmanBitWriter(nil) - }, -} - -// StatelessDeflate allows compressing directly to a Writer without retaining state. -// When returning everything will be flushed. -// Up to 8KB of an optional dictionary can be given which is presumed to precede the block. -// Longer dictionaries will be truncated and will still produce valid output. -// Sending nil dictionary is perfectly fine. -func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error { - var dst tokens - bw := bitWriterPool.Get().(*huffmanBitWriter) - bw.reset(out) - defer func() { - // don't keep a reference to our output - bw.reset(nil) - bitWriterPool.Put(bw) - }() - if eof && len(in) == 0 { - // Just write an EOF block. - // Could be faster... - bw.writeStoredHeader(0, true) - bw.flush() - return bw.err - } - - // Truncate dict - if len(dict) > maxStatelessDict { - dict = dict[len(dict)-maxStatelessDict:] - } - - // For subsequent loops, keep shallow dict reference to avoid alloc+copy. - var inDict []byte - - for len(in) > 0 { - todo := in - if len(inDict) > 0 { - if len(todo) > maxStatelessBlock-maxStatelessDict { - todo = todo[:maxStatelessBlock-maxStatelessDict] - } - } else if len(todo) > maxStatelessBlock-len(dict) { - todo = todo[:maxStatelessBlock-len(dict)] - } - inOrg := in - in = in[len(todo):] - uncompressed := todo - if len(dict) > 0 { - // combine dict and source - bufLen := len(todo) + len(dict) - combined := make([]byte, bufLen) - copy(combined, dict) - copy(combined[len(dict):], todo) - todo = combined - } - // Compress - if len(inDict) == 0 { - statelessEnc(&dst, todo, int16(len(dict))) - } else { - statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict) - } - isEof := eof && len(in) == 0 - - if dst.n == 0 { - bw.writeStoredHeader(len(uncompressed), isEof) - if bw.err != nil { - return bw.err - } - bw.writeBytes(uncompressed) - } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 { - // If we removed less than 1/16th, huffman compress the block. - bw.writeBlockHuff(isEof, uncompressed, len(in) == 0) - } else { - bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0) - } - if len(in) > 0 { - // Retain a dict if we have more - inDict = inOrg[len(uncompressed)-maxStatelessDict:] - dict = nil - dst.Reset() - } - if bw.err != nil { - return bw.err - } - } - if !eof { - // Align, only a stored block can do that. - bw.writeStoredHeader(0, false) - } - bw.flush() - return bw.err -} - -func hashSL(u uint32) uint32 { - return (u * 0x1e35a7bd) >> slTableShift -} - -func load3216(b []byte, i int16) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load6416(b []byte, i int16) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -func statelessEnc(dst *tokens, src []byte, startAt int16) { - const ( - inputMargin = 12 - 1 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - type tableEntry struct { - offset int16 - } - - var table [slTableSize]tableEntry - - // This check isn't in the Snappy implementation, but there, the caller - // instead of the callee handles this case. - if len(src)-int(startAt) < minNonLiteralBlockSize { - // We do not fill the token table. - // This will be picked up by caller. - dst.n = 0 - return - } - // Index until startAt - if startAt > 0 { - cv := load3232(src, 0) - for i := int16(0); i < startAt; i++ { - table[hashSL(cv)] = tableEntry{offset: i} - cv = (cv >> 8) | (uint32(src[i+4]) << 24) - } - } - - s := startAt + 1 - nextEmit := startAt - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := int16(len(src) - inputMargin) - - // nextEmit is where in src the next emitLiteral should start from. - cv := load3216(src, s) - - for { - const skipLog = 5 - const doEvery = 2 - - nextS := s - var candidate tableEntry - for { - nextHash := hashSL(cv) - candidate = table[nextHash] - nextS = s + doEvery + (s-nextEmit)>>skipLog - if nextS > sLimit || nextS <= 0 { - goto emitRemainder - } - - now := load6416(src, nextS) - table[nextHash] = tableEntry{offset: s} - nextHash = hashSL(uint32(now)) - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - - // Do one right away... - cv = uint32(now) - s = nextS - nextS++ - candidate = table[nextHash] - now >>= 8 - table[nextHash] = tableEntry{offset: s} - - if cv == load3216(src, candidate.offset) { - table[nextHash] = tableEntry{offset: nextS} - break - } - cv = uint32(now) - s = nextS - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - - // Extend the 4-byte match as long as possible. - t := candidate.offset - l := int16(matchLen(src[s+4:], src[t+4:]) + 4) - - // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - if nextEmit < s { - if false { - emitLiteral(dst, src[nextEmit:s]) - } else { - for _, v := range src[nextEmit:s] { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } - } - } - - // Save the match found - dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset)) - s += l - nextEmit = s - if nextS >= s { - s = nextS + 1 - } - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-2 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load6416(src, s-2) - o := s - 2 - prevHash := hashSL(uint32(x)) - table[prevHash] = tableEntry{offset: o} - x >>= 16 - currHash := hashSL(uint32(x)) - candidate = table[currHash] - table[currHash] = tableEntry{offset: o + 2} - - if uint32(x) != load3216(src, candidate.offset) { - cv = uint32(x >> 8) - s++ - break - } - } - } - -emitRemainder: - if int(nextEmit) < len(src) { - // If nothing was added, don't encode literals. - if dst.n == 0 { - return - } - emitLiteral(dst, src[nextEmit:]) - } -} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go deleted file mode 100644 index d818790c..00000000 --- a/vendor/github.com/klauspost/compress/flate/token.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flate - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits - // bits 16-22 offsetcode - 5 bits - // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits - // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits - lengthShift = 22 - offsetMask = 1<maxnumlit - offHist [32]uint16 // offset codes - litHist [256]uint16 // codes 0->255 - nFilled int - n uint16 // Must be able to contain maxStoreBlockSize - tokens [maxStoreBlockSize + 1]token -} - -func (t *tokens) Reset() { - if t.n == 0 { - return - } - t.n = 0 - t.nFilled = 0 - for i := range t.litHist[:] { - t.litHist[i] = 0 - } - for i := range t.extraHist[:] { - t.extraHist[i] = 0 - } - for i := range t.offHist[:] { - t.offHist[i] = 0 - } -} - -func (t *tokens) Fill() { - if t.n == 0 { - return - } - for i, v := range t.litHist[:] { - if v == 0 { - t.litHist[i] = 1 - t.nFilled++ - } - } - for i, v := range t.extraHist[:literalCount-256] { - if v == 0 { - t.nFilled++ - t.extraHist[i] = 1 - } - } - for i, v := range t.offHist[:offsetCodeCount] { - if v == 0 { - t.offHist[i] = 1 - } - } -} - -func indexTokens(in []token) tokens { - var t tokens - t.indexTokens(in) - return t -} - -func (t *tokens) indexTokens(in []token) { - t.Reset() - for _, tok := range in { - if tok < matchType { - t.AddLiteral(tok.literal()) - continue - } - t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask) - } -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst *tokens, lit []byte) { - for _, v := range lit { - dst.tokens[dst.n] = token(v) - dst.litHist[v]++ - dst.n++ - } -} - -func (t *tokens) AddLiteral(lit byte) { - t.tokens[t.n] = token(lit) - t.litHist[lit]++ - t.n++ -} - -// from https://stackoverflow.com/a/28730362 -func mFastLog2(val float32) float32 { - ux := int32(math.Float32bits(val)) - log2 := (float32)(((ux >> 23) & 255) - 128) - ux &= -0x7f800001 - ux += 127 << 23 - uval := math.Float32frombits(uint32(ux)) - log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759 - return log2 -} - -// EstimatedBits will return an minimum size estimated by an *optimal* -// compression of the block. -// The size of the block -func (t *tokens) EstimatedBits() int { - shannon := float32(0) - bits := int(0) - nMatches := 0 - total := int(t.n) + t.nFilled - if total > 0 { - invTotal := 1.0 / float32(total) - for _, v := range t.litHist[:] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - } - } - // Just add 15 for EOB - shannon += 15 - for i, v := range t.extraHist[1 : literalCount-256] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(lengthExtraBits[i&31]) * int(v) - nMatches += int(v) - } - } - } - if nMatches > 0 { - invTotal := 1.0 / float32(nMatches) - for i, v := range t.offHist[:offsetCodeCount] { - if v > 0 { - n := float32(v) - shannon += atLeastOne(-mFastLog2(n*invTotal)) * n - bits += int(offsetExtraBits[i&31]) * int(v) - } - } - } - return int(shannon) + bits -} - -// AddMatch adds a match to the tokens. -// This function is very sensitive to inlining and right on the border. -func (t *tokens) AddMatch(xlength uint32, xoffset uint32) { - if debugDeflate { - if xlength >= maxMatchLength+baseMatchLength { - panic(fmt.Errorf("invalid length: %v", xlength)) - } - if xoffset >= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oCode := offsetCode(xoffset) - xoffset |= oCode << 16 - - t.extraHist[lengthCodes1[uint8(xlength)]]++ - t.offHist[oCode&31]++ - t.tokens[t.n] = token(matchType | xlength<= maxMatchOffset+baseMatchOffset { - panic(fmt.Errorf("invalid offset: %v", xoffset)) - } - } - oc := offsetCode(xoffset) - xoffset |= oc << 16 - for xlength > 0 { - xl := xlength - if xl > 258 { - // We need to have at least baseMatchLength left over for next loop. - if xl > 258+baseMatchLength { - xl = 258 - } else { - xl = 258 - baseMatchLength - } - } - xlength -= xl - xl -= baseMatchLength - t.extraHist[lengthCodes1[uint8(xl)]]++ - t.offHist[oc&31]++ - t.tokens[t.n] = token(matchType | uint32(xl)<> lengthShift) } - -// Convert length to code. -func lengthCode(len uint8) uint8 { return lengthCodes[len] } - -// Returns the offset code corresponding to a specific offset -func offsetCode(off uint32) uint32 { - if false { - if off < uint32(len(offsetCodes)) { - return offsetCodes[off&255] - } else if off>>7 < uint32(len(offsetCodes)) { - return offsetCodes[(off>>7)&255] + 14 - } else { - return offsetCodes[(off>>14)&255] + 28 - } - } - if off < uint32(len(offsetCodes)) { - return offsetCodes[uint8(off)] - } - return offsetCodes14[uint8(off>>7)] -} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go index 43e46361..e82fa3bb 100644 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -152,12 +152,11 @@ func (b *bitWriter) flushAlign() { // close will write the alignment bit and write the final byte(s) // to the output. -func (b *bitWriter) close() error { +func (b *bitWriter) close() { // End mark b.addBits16Clean(1, 1) // flush until next byte. b.flushAlign() - return nil } // reset and continue writing by appending to out. diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index dac97e58..074018d8 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -199,7 +199,8 @@ func (s *Scratch) compress(src []byte) error { c2.flush(s.actualTableLog) c1.flush(s.actualTableLog) - return s.bw.close() + s.bw.close() + return nil } // writeCount will write the normalized histogram count to header. @@ -211,7 +212,7 @@ func (s *Scratch) writeCount() error { previous0 bool charnum uint16 - maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 // Write Table Size bitStream = uint32(tableLog - minTablelog) diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index 926f5f15..cc05d0f7 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error { // If the buffer is over-read an error is returned. func (s *Scratch) decompress() error { br := &s.bits - br.init(s.br.unread()) + if err := br.init(s.br.unread()); err != nil { + return err + } var s1, s2 decoder // Initialize and decode first state and symbol. diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go deleted file mode 100644 index 66fe5ddf..00000000 --- a/vendor/github.com/klauspost/compress/gzip/gunzip.go +++ /dev/null @@ -1,349 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package gzip implements reading and writing of gzip format compressed files, -// as specified in RFC 1952. -package gzip - -import ( - "bufio" - "compress/gzip" - "encoding/binary" - "hash/crc32" - "io" - "time" - - "github.com/klauspost/compress/flate" -) - -const ( - gzipID1 = 0x1f - gzipID2 = 0x8b - gzipDeflate = 8 - flagText = 1 << 0 - flagHdrCrc = 1 << 1 - flagExtra = 1 << 2 - flagName = 1 << 3 - flagComment = 1 << 4 -) - -var ( - // ErrChecksum is returned when reading GZIP data that has an invalid checksum. - ErrChecksum = gzip.ErrChecksum - // ErrHeader is returned when reading GZIP data that has an invalid header. - ErrHeader = gzip.ErrHeader -) - -var le = binary.LittleEndian - -// noEOF converts io.EOF to io.ErrUnexpectedEOF. -func noEOF(err error) error { - if err == io.EOF { - return io.ErrUnexpectedEOF - } - return err -} - -// The gzip file stores a header giving metadata about the compressed file. -// That header is exposed as the fields of the Writer and Reader structs. -// -// Strings must be UTF-8 encoded and may only contain Unicode code points -// U+0001 through U+00FF, due to limitations of the GZIP file format. -type Header struct { - Comment string // comment - Extra []byte // "extra data" - ModTime time.Time // modification time - Name string // file name - OS byte // operating system type -} - -// A Reader is an io.Reader that can be read to retrieve -// uncompressed data from a gzip-format compressed file. -// -// In general, a gzip file can be a concatenation of gzip files, -// each with its own header. Reads from the Reader -// return the concatenation of the uncompressed data of each. -// Only the first header is recorded in the Reader fields. -// -// Gzip files store a length and checksum of the uncompressed data. -// The Reader will return a ErrChecksum when Read -// reaches the end of the uncompressed data if it does not -// have the expected length or checksum. Clients should treat data -// returned by Read as tentative until they receive the io.EOF -// marking the end of the data. -type Reader struct { - Header // valid after NewReader or Reader.Reset - r flate.Reader - br *bufio.Reader - decompressor io.ReadCloser - digest uint32 // CRC-32, IEEE polynomial (section 8) - size uint32 // Uncompressed size (section 2.3.1) - buf [512]byte - err error - multistream bool -} - -// NewReader creates a new Reader reading the given reader. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// -// It is the caller's responsibility to call Close on the Reader when done. -// -// The Reader.Header fields will be valid in the Reader returned. -func NewReader(r io.Reader) (*Reader, error) { - z := new(Reader) - if err := z.Reset(r); err != nil { - return nil, err - } - return z, nil -} - -// Reset discards the Reader z's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *Reader) Reset(r io.Reader) error { - *z = Reader{ - decompressor: z.decompressor, - multistream: true, - } - if rr, ok := r.(flate.Reader); ok { - z.r = rr - } else { - // Reuse if we can. - if z.br != nil { - z.br.Reset(r) - } else { - z.br = bufio.NewReader(r) - } - z.r = z.br - } - z.Header, z.err = z.readHeader() - return z.err -} - -// Multistream controls whether the reader supports multistream files. -// -// If enabled (the default), the Reader expects the input to be a sequence -// of individually gzipped data streams, each with its own header and -// trailer, ending at EOF. The effect is that the concatenation of a sequence -// of gzipped files is treated as equivalent to the gzip of the concatenation -// of the sequence. This is standard behavior for gzip readers. -// -// Calling Multistream(false) disables this behavior; disabling the behavior -// can be useful when reading file formats that distinguish individual gzip -// data streams or mix gzip data streams with other data streams. -// In this mode, when the Reader reaches the end of the data stream, -// Read returns io.EOF. If the underlying reader implements io.ByteReader, -// it will be left positioned just after the gzip stream. -// To start the next stream, call z.Reset(r) followed by z.Multistream(false). -// If there is no next stream, z.Reset(r) will return io.EOF. -func (z *Reader) Multistream(ok bool) { - z.multistream = ok -} - -// readString reads a NUL-terminated string from z.r. -// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and -// will output a string encoded using UTF-8. -// This method always updates z.digest with the data read. -func (z *Reader) readString() (string, error) { - var err error - needConv := false - for i := 0; ; i++ { - if i >= len(z.buf) { - return "", ErrHeader - } - z.buf[i], err = z.r.ReadByte() - if err != nil { - return "", err - } - if z.buf[i] > 0x7f { - needConv = true - } - if z.buf[i] == 0 { - // Digest covers the NUL terminator. - z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1]) - - // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1). - if needConv { - s := make([]rune, 0, i) - for _, v := range z.buf[:i] { - s = append(s, rune(v)) - } - return string(s), nil - } - return string(z.buf[:i]), nil - } - } -} - -// readHeader reads the GZIP header according to section 2.3.1. -// This method does not set z.err. -func (z *Reader) readHeader() (hdr Header, err error) { - if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil { - // RFC 1952, section 2.2, says the following: - // A gzip file consists of a series of "members" (compressed data sets). - // - // Other than this, the specification does not clarify whether a - // "series" is defined as "one or more" or "zero or more". To err on the - // side of caution, Go interprets this to mean "zero or more". - // Thus, it is okay to return io.EOF here. - return hdr, err - } - if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { - return hdr, ErrHeader - } - flg := z.buf[3] - hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0) - // z.buf[8] is XFL and is currently ignored. - hdr.OS = z.buf[9] - z.digest = crc32.ChecksumIEEE(z.buf[:10]) - - if flg&flagExtra != 0 { - if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { - return hdr, noEOF(err) - } - z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2]) - data := make([]byte, le.Uint16(z.buf[:2])) - if _, err = io.ReadFull(z.r, data); err != nil { - return hdr, noEOF(err) - } - z.digest = crc32.Update(z.digest, crc32.IEEETable, data) - hdr.Extra = data - } - - var s string - if flg&flagName != 0 { - if s, err = z.readString(); err != nil { - return hdr, err - } - hdr.Name = s - } - - if flg&flagComment != 0 { - if s, err = z.readString(); err != nil { - return hdr, err - } - hdr.Comment = s - } - - if flg&flagHdrCrc != 0 { - if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil { - return hdr, noEOF(err) - } - digest := le.Uint16(z.buf[:2]) - if digest != uint16(z.digest) { - return hdr, ErrHeader - } - } - - z.digest = 0 - if z.decompressor == nil { - z.decompressor = flate.NewReader(z.r) - } else { - z.decompressor.(flate.Resetter).Reset(z.r, nil) - } - return hdr, nil -} - -// Read implements io.Reader, reading uncompressed bytes from its underlying Reader. -func (z *Reader) Read(p []byte) (n int, err error) { - if z.err != nil { - return 0, z.err - } - - for n == 0 { - n, z.err = z.decompressor.Read(p) - z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n]) - z.size += uint32(n) - if z.err != io.EOF { - // In the normal case we return here. - return n, z.err - } - - // Finished file; check checksum and size. - if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil { - z.err = noEOF(err) - return n, z.err - } - digest := le.Uint32(z.buf[:4]) - size := le.Uint32(z.buf[4:8]) - if digest != z.digest || size != z.size { - z.err = ErrChecksum - return n, z.err - } - z.digest, z.size = 0, 0 - - // File is ok; check if there is another. - if !z.multistream { - return n, io.EOF - } - z.err = nil // Remove io.EOF - - if _, z.err = z.readHeader(); z.err != nil { - return n, z.err - } - } - - return n, nil -} - -// Support the io.WriteTo interface for io.Copy and friends. -func (z *Reader) WriteTo(w io.Writer) (int64, error) { - total := int64(0) - crcWriter := crc32.NewIEEE() - for { - if z.err != nil { - if z.err == io.EOF { - return total, nil - } - return total, z.err - } - - // We write both to output and digest. - mw := io.MultiWriter(w, crcWriter) - n, err := z.decompressor.(io.WriterTo).WriteTo(mw) - total += n - z.size += uint32(n) - if err != nil { - z.err = err - return total, z.err - } - - // Finished file; check checksum + size. - if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - z.err = err - return total, err - } - z.digest = crcWriter.Sum32() - digest := le.Uint32(z.buf[:4]) - size := le.Uint32(z.buf[4:8]) - if digest != z.digest || size != z.size { - z.err = ErrChecksum - return total, z.err - } - z.digest, z.size = 0, 0 - - // File is ok; check if there is another. - if !z.multistream { - return total, nil - } - crcWriter.Reset() - z.err = nil // Remove io.EOF - - if _, z.err = z.readHeader(); z.err != nil { - if z.err == io.EOF { - return total, nil - } - return total, z.err - } - } -} - -// Close closes the Reader. It does not close the underlying io.Reader. -// In order for the GZIP checksum to be verified, the reader must be -// fully consumed until the io.EOF. -func (z *Reader) Close() error { return z.decompressor.Close() } diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go deleted file mode 100644 index 26203851..00000000 --- a/vendor/github.com/klauspost/compress/gzip/gzip.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gzip - -import ( - "errors" - "fmt" - "hash/crc32" - "io" - - "github.com/klauspost/compress/flate" -) - -// These constants are copied from the flate package, so that code that imports -// "compress/gzip" does not also have to import "compress/flate". -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression - ConstantCompression = flate.ConstantCompression - HuffmanOnly = flate.HuffmanOnly - - // StatelessCompression will do compression but without maintaining any state - // between Write calls. - // There will be no memory kept between Write calls, - // but compression and speed will be suboptimal. - // Because of this, the size of actual Write calls will affect output size. - StatelessCompression = -3 -) - -// A Writer is an io.WriteCloser. -// Writes to a Writer are compressed and written to w. -type Writer struct { - Header // written at first call to Write, Flush, or Close - w io.Writer - level int - err error - compressor *flate.Writer - digest uint32 // CRC-32, IEEE polynomial (section 8) - size uint32 // Uncompressed size (section 2.3.1) - wroteHeader bool - closed bool - buf [10]byte -} - -// NewWriter returns a new Writer. -// Writes to the returned writer are compressed and written to w. -// -// It is the caller's responsibility to call Close on the WriteCloser when done. -// Writes may be buffered and not flushed until Close. -// -// Callers that wish to set the fields in Writer.Header must do so before -// the first call to Write, Flush, or Close. -func NewWriter(w io.Writer) *Writer { - z, _ := NewWriterLevel(w, DefaultCompression) - return z -} - -// NewWriterLevel is like NewWriter but specifies the compression level instead -// of assuming DefaultCompression. -// -// The compression level can be DefaultCompression, NoCompression, or any -// integer value between BestSpeed and BestCompression inclusive. The error -// returned will be nil if the level is valid. -func NewWriterLevel(w io.Writer, level int) (*Writer, error) { - if level < StatelessCompression || level > BestCompression { - return nil, fmt.Errorf("gzip: invalid compression level: %d", level) - } - z := new(Writer) - z.init(w, level) - return z, nil -} - -func (z *Writer) init(w io.Writer, level int) { - compressor := z.compressor - if level != StatelessCompression { - if compressor != nil { - compressor.Reset(w) - } - } - - *z = Writer{ - Header: Header{ - OS: 255, // unknown - }, - w: w, - level: level, - compressor: compressor, - } -} - -// Reset discards the Writer z's state and makes it equivalent to the -// result of its original state from NewWriter or NewWriterLevel, but -// writing to w instead. This permits reusing a Writer rather than -// allocating a new one. -func (z *Writer) Reset(w io.Writer) { - z.init(w, z.level) -} - -// writeBytes writes a length-prefixed byte slice to z.w. -func (z *Writer) writeBytes(b []byte) error { - if len(b) > 0xffff { - return errors.New("gzip.Write: Extra data is too large") - } - le.PutUint16(z.buf[:2], uint16(len(b))) - _, err := z.w.Write(z.buf[:2]) - if err != nil { - return err - } - _, err = z.w.Write(b) - return err -} - -// writeString writes a UTF-8 string s in GZIP's format to z.w. -// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). -func (z *Writer) writeString(s string) (err error) { - // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. - needconv := false - for _, v := range s { - if v == 0 || v > 0xff { - return errors.New("gzip.Write: non-Latin-1 header string") - } - if v > 0x7f { - needconv = true - } - } - if needconv { - b := make([]byte, 0, len(s)) - for _, v := range s { - b = append(b, byte(v)) - } - _, err = z.w.Write(b) - } else { - _, err = io.WriteString(z.w, s) - } - if err != nil { - return err - } - // GZIP strings are NUL-terminated. - z.buf[0] = 0 - _, err = z.w.Write(z.buf[:1]) - return err -} - -// Write writes a compressed form of p to the underlying io.Writer. The -// compressed bytes are not necessarily flushed until the Writer is closed. -func (z *Writer) Write(p []byte) (int, error) { - if z.err != nil { - return 0, z.err - } - var n int - // Write the GZIP header lazily. - if !z.wroteHeader { - z.wroteHeader = true - z.buf[0] = gzipID1 - z.buf[1] = gzipID2 - z.buf[2] = gzipDeflate - z.buf[3] = 0 - if z.Extra != nil { - z.buf[3] |= 0x04 - } - if z.Name != "" { - z.buf[3] |= 0x08 - } - if z.Comment != "" { - z.buf[3] |= 0x10 - } - le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix())) - if z.level == BestCompression { - z.buf[8] = 2 - } else if z.level == BestSpeed { - z.buf[8] = 4 - } else { - z.buf[8] = 0 - } - z.buf[9] = z.OS - n, z.err = z.w.Write(z.buf[:10]) - if z.err != nil { - return n, z.err - } - if z.Extra != nil { - z.err = z.writeBytes(z.Extra) - if z.err != nil { - return n, z.err - } - } - if z.Name != "" { - z.err = z.writeString(z.Name) - if z.err != nil { - return n, z.err - } - } - if z.Comment != "" { - z.err = z.writeString(z.Comment) - if z.err != nil { - return n, z.err - } - } - - if z.compressor == nil && z.level != StatelessCompression { - z.compressor, _ = flate.NewWriter(z.w, z.level) - } - } - z.size += uint32(len(p)) - z.digest = crc32.Update(z.digest, crc32.IEEETable, p) - if z.level == StatelessCompression { - return len(p), flate.StatelessDeflate(z.w, p, false, nil) - } - n, z.err = z.compressor.Write(p) - return n, z.err -} - -// Flush flushes any pending compressed data to the underlying writer. -// -// It is useful mainly in compressed network protocols, to ensure that -// a remote reader has enough data to reconstruct a packet. Flush does -// not return until the data has been written. If the underlying -// writer returns an error, Flush returns that error. -// -// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. -func (z *Writer) Flush() error { - if z.err != nil { - return z.err - } - if z.closed || z.level == StatelessCompression { - return nil - } - if !z.wroteHeader { - z.Write(nil) - if z.err != nil { - return z.err - } - } - z.err = z.compressor.Flush() - return z.err -} - -// Close closes the Writer, flushing any unwritten data to the underlying -// io.Writer, but does not close the underlying io.Writer. -func (z *Writer) Close() error { - if z.err != nil { - return z.err - } - if z.closed { - return nil - } - z.closed = true - if !z.wroteHeader { - z.Write(nil) - if z.err != nil { - return z.err - } - } - if z.level == StatelessCompression { - z.err = flate.StatelessDeflate(z.w, nil, true, nil) - } else { - z.err = z.compressor.Close() - } - if z.err != nil { - return z.err - } - le.PutUint32(z.buf[:4], z.digest) - le.PutUint32(z.buf[4:8], z.size) - _, z.err = z.w.Write(z.buf[:8]) - return z.err -} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index ec71f7a3..0ebc9aaa 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -13,14 +13,6 @@ type bitWriter struct { out []byte } -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. // It will not check if there is space for them, so the caller must ensure that it has flushed recently. func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { @@ -60,6 +52,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { b.nBits += encA.nBits + encB.nBits } +// encFourSymbols adds up to 32 bits from four symbols. +// It will not check if there is space for them, +// so the caller must ensure that b has been flushed recently. +func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { + bitsA := encA.nBits + bitsB := bitsA + encB.nBits + bitsC := bitsB + encC.nBits + bitsD := bitsC + encD.nBits + combined := uint64(encA.val) | + (uint64(encB.val) << (bitsA & 63)) | + (uint64(encC.val) << (bitsB & 63)) | + (uint64(encD.val) << (bitsC & 63)) + b.bitContainer |= combined << (b.nBits & 63) + b.nBits += bitsD +} + // flush32 will flush out, so there are at least 32 bits available for writing. func (b *bitWriter) flush32() { if b.nBits < 32 { @@ -86,10 +94,9 @@ func (b *bitWriter) flushAlign() { // close will write the alignment bit and write the final byte(s) // to the output. -func (b *bitWriter) close() error { +func (b *bitWriter) close() { // End mark b.addBits16Clean(1, 1) // flush until next byte. b.flushAlign() - return nil } diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go deleted file mode 100644 index 4dcab8d2..00000000 --- a/vendor/github.com/klauspost/compress/huff0/bytereader.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - v3 := int32(b.b[b.off+3]) - v2 := int32(b.b[b.off+2]) - v1 := int32(b.b[b.off+1]) - v0 := int32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - v3 := uint32(b.b[b.off+3]) - v2 := uint32(b.b[b.off+2]) - v1 := uint32(b.b[b.off+1]) - v0 := uint32(b.b[b.off]) - return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index cdc94856..84aa3d12 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -227,10 +227,10 @@ func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err err } func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src) + return s.compress1xDo(s.Out, src), nil } -func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { +func (s *Scratch) compress1xDo(dst, src []byte) []byte { var bw = bitWriter{out: dst} // N is length divisible by 4. @@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { tmp := src[n : n+4] // tmp should be len 4 bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) } } else { for ; n >= 0; n -= 4 { @@ -261,8 +260,8 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { bw.encTwoSymbols(cTable, tmp[1], tmp[0]) } } - err := bw.close() - return bw.out, err + bw.close() + return bw.out } var sixZeros [6]byte @@ -284,12 +283,8 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { } src = src[len(toDo):] - var err error idx := len(s.Out) - s.Out, err = s.compress1xDo(s.Out, toDo) - if err != nil { - return nil, err - } + s.Out = s.compress1xDo(s.Out, toDo) if len(s.Out)-idx > math.MaxUint16 { // We cannot store the size in the jump table return nil, ErrIncompressible @@ -316,7 +311,6 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup - var errs [4]error wg.Add(4) for i := 0; i < 4; i++ { toDo := src @@ -327,15 +321,12 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { // Separate goroutine for each block. go func(i int) { - s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) wg.Done() }(i) } wg.Wait() for i := 0; i < 4; i++ { - if errs[i] != nil { - return nil, errs[i] - } o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table @@ -359,6 +350,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { // Does not update s.clearCount. func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { reuse = true + _ = s.count // Assert that s != nil to speed up the following loop. for _, v := range in { s.count[v]++ } @@ -424,7 +416,7 @@ func (s *Scratch) validateTable(c cTable) bool { // minTableLog provides the minimum logSize to safely represent a distribution. func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSrc := highBit32(uint32(s.srcLen)) + 1 minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 if minBitsSrc < minBitsSymbols { return uint8(minBitsSrc) @@ -436,7 +428,7 @@ func (s *Scratch) minTableLog() uint8 { func (s *Scratch) optimalTableLog() { tableLog := s.TableLog minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 42a237ea..54bd08b2 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { b, err := fse.Decompress(in[:iSize], s.fse) s.fse.Out = nil if err != nil { - return s, nil, err + return s, nil, fmt.Errorf("fse decompress returned: %w", err) } if len(b) > 255 { return s, nil, errors.New("corrupt input: output table too large") @@ -253,7 +253,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { switch d.actualTableLog { case 8: - const shift = 8 - 8 + const shift = 0 for br.off >= 4 { br.fillFast() v := dt[uint8(br.value>>(56+shift))] diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index e8ad17ad..77ecd68e 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -88,7 +88,7 @@ type Scratch struct { // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. MaxDecodedSize int - br byteReader + srcLen int // MaxSymbolValue will override the maximum symbol value of the next block. MaxSymbolValue uint8 @@ -170,7 +170,7 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) { if s.fse == nil { s.fse = &fse.Scratch{} } - s.br.init(in) + s.srcLen = len(in) return s, nil } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 298c4f8e..2aa6a95a 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -87,22 +87,32 @@ func emitCopy(dst []byte, offset, length int) int { return i + 2 } -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - func hash(u, shift uint32) uint32 { return (u * 0x1e35a7bd) >> shift } +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 2263853f..5a4412f9 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,4 @@ module github.com/klauspost/compress -go 1.16 +go 1.19 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 65b38abe..92e2347b 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -259,7 +259,7 @@ nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 ## Decompressor -Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. +Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), kindly supplied by [fuzzit.dev](https://fuzzit.dev/). @@ -304,7 +304,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) +var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 97299d49..25ca9839 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -17,7 +17,6 @@ import ( // for aligning the input. type bitReader struct { in []byte - off uint // next byte to read is at in[off - 1] value uint64 // Maybe use [16]byte, but shifting is awkward. bitsRead uint8 } @@ -28,7 +27,6 @@ func (b *bitReader) init(in []byte) error { return errors.New("corrupt stream: too short") } b.in = in - b.off = uint(len(in)) // The highest bit of the last byte indicates where to start v := in[len(in)-1] if v == 0 { @@ -69,21 +67,19 @@ func (b *bitReader) fillFast() { if b.bitsRead < 32 { return } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value = (b.value << 32) | uint64(low) b.bitsRead -= 32 - b.off -= 4 } // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + v := b.in[len(b.in)-8:] + b.in = b.in[:len(b.in)-8] + b.value = binary.LittleEndian.Uint64(v) b.bitsRead = 0 - b.off -= 8 } // fill() will make sure at least 32 bits are available. @@ -91,25 +87,25 @@ func (b *bitReader) fill() { if b.bitsRead < 32 { return } - if b.off >= 4 { - v := b.in[b.off-4:] - v = v[:4] + if len(b.in) >= 4 { + v := b.in[len(b.in)-4:] + b.in = b.in[:len(b.in)-4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value = (b.value << 32) | uint64(low) b.bitsRead -= 32 - b.off -= 4 return } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- + + b.bitsRead -= uint8(8 * len(b.in)) + for len(b.in) > 0 { + b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) + b.in = b.in[:len(b.in)-1] } } // finished returns true if all bits have been read from the bit stream. func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 + return len(b.in) == 0 && b.bitsRead >= 64 } // overread returns true if more bits have been requested than is on the stream. @@ -119,7 +115,7 @@ func (b *bitReader) overread() bool { // remain returns the number of bits remaining. func (b *bitReader) remain() uint { - return b.off*8 + 64 - uint(b.bitsRead) + return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 78b3c61b..1952f175 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -97,12 +97,11 @@ func (b *bitWriter) flushAlign() { // close will write the alignment bit and write the final byte(s) // to the output. -func (b *bitWriter) close() error { +func (b *bitWriter) close() { // End mark b.addBits16Clean(1, 1) // flush until next byte. b.flushAlign() - return nil } // reset and continue writing by appending to out. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 2445bb4f..9f17ce60 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -9,6 +9,7 @@ import ( "encoding/binary" "errors" "fmt" + "hash/crc32" "io" "os" "path/filepath" @@ -442,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err } } var err error + if debugDecoder { + println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) + } huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) @@ -588,7 +592,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { } seq.fse.setRLE(symb) if debugDecoder { - printf("RLE set to %+v, code: %v", symb, v) + printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 12e8f6f0..2cfe925a 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -361,14 +361,21 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { if len(lits) >= 1024 { // Use 4 Streams. out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 32 { + } else if len(lits) > 16 { // Use 1 stream single = true out, reUsed, err = huff0.Compress1X(lits, b.litEnc) } else { err = huff0.ErrIncompressible } - + if err == nil && len(out)+5 > len(lits) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSizes(len(out), len(lits), single) + if len(out)+lh.size() >= len(lits) { + err = huff0.ErrIncompressible + } + } switch err { case huff0.ErrIncompressible: if debugEncoder { @@ -473,7 +480,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { return b.encodeLits(b.literals, rawAllLits) } // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 5) + saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { if org == nil { return errIncompressible @@ -503,7 +510,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.literals) >= 1024 && !raw { // Use 4 Streams. out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 32 && !raw { + } else if len(b.literals) > 16 && !raw { // Use 1 stream single = true out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) @@ -511,6 +518,17 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { err = huff0.ErrIncompressible } + if err == nil && len(out)+5 > len(b.literals) { + // If we are close, we may still be worse or equal to raw. + var lh literalsHeader + lh.setSize(len(b.literals)) + szRaw := lh.size() + lh.setSizes(len(out), len(b.literals), single) + szComp := lh.size() + if len(out)+szComp >= len(b.literals)+szRaw { + err = huff0.ErrIncompressible + } + } switch err { case huff0.ErrIncompressible: lh.setType(literalsBlockRaw) @@ -773,16 +791,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { ml.flush(mlEnc.actualTableLog) of.flush(ofEnc.actualTableLog) ll.flush(llEnc.actualTableLog) - err = wr.close() - if err != nil { - return err - } + wr.close() b.output = wr.out + // Maybe even add a bigger margin. if len(b.output)-3-bhOffset >= b.size { - // Maybe even add a bigger margin. + // Discard and encode as raw block. + b.output = b.encodeRawTo(b.output[:bhOffset], org) + b.popOffsets() b.litEnc.Reuse = huff0.ReusePolicyNone - return errIncompressible + return nil } // Size is output minus block header. diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index 176788f2..55a38855 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { func (b *byteBuf) readByte() (byte, error) { bb := *b if len(bb) < 1 { - return 0, nil + return 0, io.ErrUnexpectedEOF } r := bb[0] *b = bb[1:] @@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { } func (r *readerWrapper) readByte() (byte, error) { - n2, err := r.r.Read(r.tmp[:1]) + n2, err := io.ReadFull(r.r, r.tmp[:1]) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index f6a24097..6a5a2988 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -95,42 +95,54 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { *h = Header{} if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 b, in := in[:4], in[4:] if string(b) != frameMagic { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch + return nil, ErrMagicMismatch } if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 h.Skippable = true h.SkippableID = int(b[0] & 0xf) h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil + return in[4:], nil } // Read Window_Descriptor // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } fhd, in := in[0], in[1:] h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") + return nil, errors.New("reserved bit set on frame header") } if !h.SingleSegment { if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] @@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error { size = 4 } if len(in) < int(size) { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:size], in[size:] h.HeaderSize += int(size) @@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error { if fcsSize > 0 { h.HasFCS = true if len(in) < fcsSize { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) @@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error { // Frame Header done, we will not fail from now on. if len(in) < 3 { - return nil + return in, nil } tmp := in[:3] bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) @@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error { cSize := int(bh >> 3) switch blockType { case blockTypeReserved: - return nil + return in, nil case blockTypeRLE: h.FirstBlock.Compressed = true h.FirstBlock.DecompressedSize = cSize @@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error { } h.FirstBlock.OK = true - return nil + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 7113e69e..f04aaa21 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -455,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { } if len(next.b) > 0 { - n, err := d.current.crc.Write(next.b) - if err == nil { - if n != len(next.b) { - d.current.err = io.ErrShortWrite - } - } + d.current.crc.Write(next.b) } if next.err == nil && next.d != nil && next.d.hasCRC { got := uint32(d.current.crc.Sum64()) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 07a90dd7..774c5f00 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -107,7 +107,7 @@ func WithDecoderDicts(dicts ...[]byte) DOption { } } -// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// WithDecoderDictRaw registers a dictionary that may be used by the decoder. // The slice content can be arbitrary data. func WithDecoderDictRaw(id uint32, content []byte) DOption { return func(o *decoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 66a95c18..8d5567fe 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -1,10 +1,13 @@ package zstd import ( + "bytes" "encoding/binary" "errors" "fmt" "io" + "math" + "sort" "github.com/klauspost/compress/huff0" ) @@ -14,9 +17,8 @@ type dict struct { litEnc *huff0.Scratch llDec, ofDec, mlDec sequenceDec - //llEnc, ofEnc, mlEnc []*fseEncoder - offsets [3]int - content []byte + offsets [3]int + content []byte } const dictMagic = "\x37\xa4\x30\xec" @@ -32,14 +34,38 @@ func (d *dict) ID() uint32 { return d.id } -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { if d == nil { return 0 } return len(d.content) } +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + // Load a dictionary as described in // https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format func loadDict(b []byte) (*dict, error) { @@ -64,7 +90,7 @@ func loadDict(b []byte) (*dict, error) { var err error d.litEnc, b, err = huff0.ReadTable(b[8:], nil) if err != nil { - return nil, err + return nil, fmt.Errorf("loading literal table: %w", err) } d.litEnc.Reuse = huff0.ReusePolicyMust @@ -122,3 +148,387 @@ func loadDict(b []byte) (*dict, error) { return &d, nil } + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} + +type BuildDictOptions struct { + // Dictionary ID. + ID uint32 + + // Content to use to create dictionary tables. + Contents [][]byte + + // History to use for all blocks. + History []byte + + // Offsets to use. + Offsets [3]int + + // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. + // See https://github.com/facebook/zstd/issues/3724 + CompatV155 bool + + // Use the specified encoder level. + // The dictionary will be built using the specified encoder level, + // which will reflect speed and make the dictionary tailored for that level. + // If not set SpeedBestCompression will be used. + Level EncoderLevel + + // DebugOut will write stats and other details here if set. + DebugOut io.Writer +} + +func BuildDict(o BuildDictOptions) ([]byte, error) { + initPredefined() + hist := o.History + contents := o.Contents + debug := o.DebugOut != nil + println := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintln(o.DebugOut, args...) + } + } + printf := func(s string, args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprintf(o.DebugOut, s, args...) + } + } + print := func(args ...interface{}) { + if o.DebugOut != nil { + fmt.Fprint(o.DebugOut, args...) + } + } + + if int64(len(hist)) > dictMaxLength { + return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) + } + if len(hist) < 8 { + return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) + } + if len(contents) == 0 { + return nil, errors.New("no content provided") + } + d := dict{ + id: o.ID, + litEnc: nil, + llDec: sequenceDec{}, + ofDec: sequenceDec{}, + mlDec: sequenceDec{}, + offsets: o.Offsets, + content: hist, + } + block := blockEnc{lowMem: false} + block.init() + enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) + if o.Level != 0 { + eOpts := encoderOptions{ + level: o.Level, + blockSize: maxMatchLen, + windowSize: maxMatchLen, + dict: &d, + lowMem: false, + } + enc = eOpts.encoder() + } else { + o.Level = SpeedBestCompression + } + var ( + remain [256]int + ll [256]int + ml [256]int + of [256]int + ) + addValues := func(dst *[256]int, src []byte) { + for _, v := range src { + dst[v]++ + } + } + addHist := func(dst *[256]int, src *[256]uint32) { + for i, v := range src { + dst[i] += int(v) + } + } + seqs := 0 + nUsed := 0 + litTotal := 0 + newOffsets := make(map[uint32]int, 1000) + for _, b := range contents { + block.reset(nil) + if len(b) < 8 { + continue + } + nUsed++ + enc.Reset(&d, true) + enc.Encode(&block, b) + addValues(&remain, block.literals) + litTotal += len(block.literals) + seqs += len(block.sequences) + block.genCodes() + addHist(&ll, block.coders.llEnc.Histogram()) + addHist(&ml, block.coders.mlEnc.Histogram()) + addHist(&of, block.coders.ofEnc.Histogram()) + for i, seq := range block.sequences { + if i > 3 { + break + } + offset := seq.offset + if offset == 0 { + continue + } + if offset > 3 { + newOffsets[offset-3]++ + } else { + newOffsets[uint32(o.Offsets[offset-1])]++ + } + } + } + // Find most used offsets. + var sortedOffsets []uint32 + for k := range newOffsets { + sortedOffsets = append(sortedOffsets, k) + } + sort.Slice(sortedOffsets, func(i, j int) bool { + a, b := sortedOffsets[i], sortedOffsets[j] + if a == b { + // Prefer the longer offset + return sortedOffsets[i] > sortedOffsets[j] + } + return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] + }) + if len(sortedOffsets) > 3 { + if debug { + print("Offsets:") + for i, v := range sortedOffsets { + if i > 20 { + break + } + printf("[%d: %d],", v, newOffsets[v]) + } + println("") + } + + sortedOffsets = sortedOffsets[:3] + } + for i, v := range sortedOffsets { + o.Offsets[i] = int(v) + } + if debug { + println("New repeat offsets", o.Offsets) + } + + if nUsed == 0 || seqs == 0 { + return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) + } + if debug { + println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) + } + if seqs/nUsed < 512 { + // Use 512 as minimum. + nUsed = seqs / 512 + } + copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { + hist := dst.Histogram() + var maxSym uint8 + var maxCount int + var fakeLength int + for i, v := range src { + if v > 0 { + v = v / nUsed + if v == 0 { + v = 1 + } + } + if v > maxCount { + maxCount = v + } + if v != 0 { + maxSym = uint8(i) + } + fakeLength += v + hist[i] = uint32(v) + } + dst.HistogramFinished(maxSym, maxCount) + dst.reUsed = false + dst.useRLE = false + err := dst.normalizeCount(fakeLength) + if err != nil { + return nil, err + } + if debug { + println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) + } + return dst.writeCount(nil) + } + if debug { + print("Literal lengths: ") + } + llTable, err := copyHist(block.coders.llEnc, &ll) + if err != nil { + return nil, err + } + if debug { + print("Match lengths: ") + } + mlTable, err := copyHist(block.coders.mlEnc, &ml) + if err != nil { + return nil, err + } + if debug { + print("Offsets: ") + } + ofTable, err := copyHist(block.coders.ofEnc, &of) + if err != nil { + return nil, err + } + + // Literal table + avgSize := litTotal + if avgSize > huff0.BlockSizeMax/2 { + avgSize = huff0.BlockSizeMax / 2 + } + huffBuff := make([]byte, 0, avgSize) + // Target size + div := litTotal / avgSize + if div < 1 { + div = 1 + } + if debug { + println("Huffman weights:") + } + for i, n := range remain[:] { + if n > 0 { + n = n / div + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + if debug { + printf("[%d: %d], ", i, n) + } + } + } + if o.CompatV155 && remain[255]/div == 0 { + huffBuff = append(huffBuff, 255) + } + scratch := &huff0.Scratch{TableLog: 11} + for tries := 0; tries < 255; tries++ { + scratch = &huff0.Scratch{TableLog: 11} + _, _, err = huff0.Compress1X(huffBuff, scratch) + if err == nil { + break + } + if debug { + printf("Try %d: Huffman error: %v\n", tries+1, err) + } + huffBuff = huffBuff[:0] + if tries == 250 { + if debug { + println("Huffman: Bailing out with predefined table") + } + + // Bail out.... Just generate something + huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) + for i := 0; i < 128; i++ { + huffBuff = append(huffBuff, byte(i)) + } + continue + } + if errors.Is(err, huff0.ErrIncompressible) { + // Try truncating least common. + for i, n := range remain[:] { + if n > 0 { + n = n / (div * (i + 1)) + if n > 0 { + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { + huffBuff = append(huffBuff, 255) + } + if len(huffBuff) == 0 { + huffBuff = append(huffBuff, 0, 255) + } + } + if errors.Is(err, huff0.ErrUseRLE) { + for i, n := range remain[:] { + n = n / (div * (i + 1)) + // Allow all entries to be represented. + if n == 0 { + n = 1 + } + huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) + } + } + } + + var out bytes.Buffer + out.Write([]byte(dictMagic)) + out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) + out.Write(scratch.OutTable) + if debug { + println("huff table:", len(scratch.OutTable), "bytes") + println("of table:", len(ofTable), "bytes") + println("ml table:", len(mlTable), "bytes") + println("ll table:", len(llTable), "bytes") + } + out.Write(ofTable) + out.Write(mlTable) + out.Write(llTable) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) + out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) + out.Write(hist) + if debug { + _, err := loadDict(out.Bytes()) + if err != nil { + panic(err) + } + i, err := InspectDictionary(out.Bytes()) + if err != nil { + panic(err) + } + println("ID:", i.ID()) + println("Content size:", i.ContentSize()) + println("Encoder:", i.LitEncoder() != nil) + println("Offsets:", i.Offsets()) + var totalSize int + for _, b := range contents { + totalSize += len(b) + } + + encWith := func(opts ...EOption) int { + enc, err := NewWriter(nil, opts...) + if err != nil { + panic(err) + } + defer enc.Close() + var dst []byte + var totalSize int + for _, b := range contents { + dst = enc.EncodeAll(b, dst[:0]) + totalSize += len(dst) + } + return totalSize + } + plain := encWith(WithEncoderLevel(o.Level)) + withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) + println("Input size:", totalSize) + println("Plain Compressed:", plain) + println("Dict Compressed:", withDict) + println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") + } + return out.Bytes(), nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index bfb2e146..5ca46038 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -144,12 +144,13 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { } else { e.crc.Reset() } + e.blk.dictLitEnc = nil if d != nil { low := e.lowMem if singleBlock { e.lowMem = true } - e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) e.lowMem = low } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index 830f5ba7..c81a1535 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -32,10 +32,9 @@ type match struct { length int32 rep int32 est int32 - _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes } -const highScore = 25000 +const highScore = maxMatchLen * 8 // estBits will estimate output bits from predefined tables. func (m *match) estBits(bitsPerByte int32) { @@ -44,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) { if m.rep < 0 { ofc = ofCode(uint32(m.s-m.offset) + 3) } else { - ofc = ofCode(uint32(m.rep)) + ofc = ofCode(uint32(m.rep) & 3) } // Cost, excluding ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] @@ -160,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { // nextEmit is where in src the next emitLiteral should start from. nextEmit := s - cv := load6432(src, s) // Relative offsets offset1 := int32(blk.recentOffsets[0]) @@ -174,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { blk.literals = append(blk.literals, src[nextEmit:until]...) s.litLen = uint32(until - nextEmit) } - _ = addLiterals if debugEncoder { println("recent offsets:", blk.recentOffsets) @@ -189,58 +186,103 @@ encodeLoop: panic("offset0 was 0") } - bestOf := func(a, b *match) *match { - if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { - return a - } - return b - } - const goodEnough = 100 + const goodEnough = 250 + + cv := load6432(src, s) nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) candidateL := e.longTable[nextHashL] candidateS := e.table[nextHashS] - matchAt := func(offset int32, s int32, first uint32, rep int32) match { - if s-offset >= e.maxMatchOff || load3232(src, offset) != first { - return match{s: s, est: highScore} + // Set m to a match at offset if it looks like that will improve compression. + improve := func(m *match, offset int32, s int32, first uint32, rep int32) { + delta := s - offset + if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { + return } if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) } } - m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} - m.estBits(bitsPerByte) - return m + // Try to quick reject if we already have a long match. + if m.length > 16 { + left := len(src) - int(m.s+m.length) + // If we are too close to the end, keep as is. + if left <= 0 { + return + } + checkLen := m.length - (s - m.s) - 8 + if left > 2 && checkLen > 4 { + // Check 4 bytes, 4 bytes from the end of the current match. + a := load3232(src, offset+checkLen) + b := load3232(src, s+checkLen) + if a != b { + return + } + } + } + l := 4 + e.matchlen(s+4, offset+4, src) + if true { + // Extend candidate match backwards as far as possible. + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { + s-- + offset-- + l++ + } + } + + cand := match{offset: offset, s: s, length: l, rep: rep} + cand.estBits(bitsPerByte) + if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { + *m = cand + } } - m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) - best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) + best := match{s: s, est: highScore} + improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) if canRepeat && best.length < goodEnough { - cv32 := uint32(cv >> 8) - spp := s + 1 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) - if best.length > 0 { - cv32 = uint32(cv >> 24) - spp += 2 - m1 := matchAt(spp-offset1, spp, cv32, 1) - m2 := matchAt(spp-offset2, spp, cv32, 2) - m3 := matchAt(spp-offset3, spp, cv32, 3) - best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) + if s == nextEmit { + // Check repeats straight after a match. + improve(&best, s-offset2, s, uint32(cv), 1|4) + improve(&best, s-offset3, s, uint32(cv), 2|4) + if offset1 > 1 { + improve(&best, s-(offset1-1), s, uint32(cv), 3|4) + } + } + + // If either no match or a non-repeat match, check at + 1 + if best.rep <= 0 { + cv32 := uint32(cv >> 8) + spp := s + 1 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + if best.rep < 0 { + cv32 = uint32(cv >> 24) + spp += 2 + improve(&best, spp-offset1, spp, cv32, 1) + improve(&best, spp-offset2, spp, cv32, 2) + improve(&best, spp-offset3, spp, cv32, 3) + } } } // Load next and check... e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + index0 := s + 1 // Look far ahead, unless we have a really long match already... if best.length < goodEnough { @@ -250,47 +292,45 @@ encodeLoop: if s >= sLimit { break encodeLoop } - cv = load6432(src, s) continue } - s++ candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s) - cv2 := load6432(src, s+1) + cv = load6432(src, s+1) + cv2 := load6432(src, s+2) candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] // Short at s+1 - m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) // Long at s+1, s+2 - m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) - m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) - m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) - m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) - best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) + improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) + improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) + improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) if false { // Short at s+3. // Too often worse... - m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) - best = bestOf(best, &m) + improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) } - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - const skipBeginning = 2 - if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd := bestOf(best, &m) - if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { - m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - bestEnd = bestOf(bestEnd, &m) + + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + // We cannot do this if we have already indexed this position. + const skipBeginning = 2 + if best.s > s-skipBeginning { + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + + if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { + improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + } } - best = bestEnd } } } @@ -303,51 +343,31 @@ encodeLoop: // We have a match, we can store the forward value if best.rep > 0 { - s = best.s var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := best.s - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - repIndex := best.offset - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ + if debugAsserts && s < nextEmit { + panic("s < nextEmit") } - addLiterals(&seq, start) + addLiterals(&seq, best.s) - // rep 0 - seq.offset = uint32(best.rep) + // Repeat. If bit 4 is set, this is a non-lit repeat. + seq.offset = uint32(best.rep & 3) if debugSequences { println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - // Index match start+1 (long) -> s - 1 - index0 := s + // Index old s + 1 -> s - 1 s = best.s + best.length - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } // Index skipped... + end := s + if s > sLimit+4 { + end = sLimit + 4 + } off := index0 + e.cur - for index0 < s-1 { + for index0 < end { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) @@ -356,13 +376,21 @@ encodeLoop: off++ index0++ } + switch best.rep { - case 2: + case 2, 4 | 1: offset1, offset2 = offset2, offset1 - case 3: + case 3, 4 | 2: offset1, offset2, offset3 = offset3, offset1, offset2 + case 4 | 3: + offset1, offset2, offset3 = offset1-1, offset1, offset2 + } + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + } + break encodeLoop } - cv = load6432(src, s) continue } @@ -380,22 +408,9 @@ encodeLoop: panic("invalid offset") } - // Extend the n-byte match as long as possible. - l := best.length - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - // Write our sequence var seq seq + l := best.length seq.litLen = uint32(s - nextEmit) seq.matchLen = uint32(l - zstdMinMatch) if seq.litLen > 0 { @@ -408,65 +423,25 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) nextEmit = s - if s >= sLimit { - break encodeLoop + + // Index old s + 1 -> s - 1 or sLimit + end := s + if s > sLimit-4 { + end = sLimit - 4 } - // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 - // every entry - for index0 < s-1 { + off := index0 + e.cur + for index0 < end { cv0 := load6432(src, index0) h0 := hashLen(cv0, bestLongTableBits, bestLongLen) h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} index0++ + off++ } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) + if s >= sLimit { + break encodeLoop } } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 8582f31a..20d25b0e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -145,7 +145,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched int32 + var matched, index0 int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -162,6 +162,7 @@ encodeLoop: off := s + e.cur e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -258,7 +259,6 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -498,15 +498,15 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 + off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} index0 += 2 + off += 2 } cv = load6432(src, s) @@ -672,7 +672,7 @@ encodeLoop: var t int32 // We allow the encoder to optionally turn off repeat offsets across blocks canRepeat := len(blk.sequences) > 2 - var matched int32 + var matched, index0 int32 for { if debugAsserts && canRepeat && offset1 == 0 { @@ -691,6 +691,7 @@ encodeLoop: e.markLongShardDirty(nextHashL) e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} e.markShortShardDirty(nextHashS) + index0 = s + 1 if canRepeat { if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { @@ -726,7 +727,6 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - index0 := s + repOff s += lenght + repOff nextEmit = s @@ -790,7 +790,6 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - index0 := s + repOff2 s += lenght + repOff2 nextEmit = s if s >= sLimit { @@ -1024,18 +1023,18 @@ encodeLoop: } // Index match start+1 (long) -> s - 1 - index0 := s - l + 1 + off := index0 + e.cur for index0 < s-1 { cv0 := load6432(src, index0) cv1 := cv0 >> 8 h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} e.markLongShardDirty(h0) h1 := hashLen(cv1, betterShortTableBits, betterShortLen) e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} e.markShortShardDirty(h1) index0 += 2 + off += 2 } cv = load6432(src, s) diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 7d425109..a154c18f 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -1084,7 +1084,7 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id - e.allDirty = true + allDirty = true } // Reset table to initial state e.cur = e.maxMatchOff diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 315b1a8f..f45a3da7 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -133,8 +133,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. @@ -645,8 +644,7 @@ encodeLoop: if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { // Consider history as well. var seq seq - var length int32 - length = 4 + e.matchlen(s+6, repIndex+4, src) + length := 4 + e.matchlen(s+6, repIndex+4, src) seq.matchLen = uint32(length - zstdMinMatch) @@ -831,13 +829,12 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } if true { end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 3 { + for i := e.maxMatchOff; i < end; i += 2 { const hashLog = tableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 - nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -846,10 +843,6 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { val: uint32(cv >> 8), offset: i + 1, } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } } } e.lastDictID = d.id diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 65c6c36d..72af7ef0 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -227,10 +227,7 @@ func (e *Encoder) nextBlock(final bool) error { DictID: e.o.dict.ID(), } - dst, err := fh.appendTo(tmp[:0]) - if err != nil { - return err - } + dst := fh.appendTo(tmp[:0]) s.headerWritten = true s.wWg.Wait() var n2 int @@ -277,23 +274,9 @@ func (e *Encoder) nextBlock(final bool) error { s.eofWritten = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.err = err - return err + s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.err != nil { + return s.err } _, s.err = s.w.Write(blk.output) s.nWritten += int64(len(blk.output)) @@ -343,22 +326,8 @@ func (e *Encoder) nextBlock(final bool) error { } s.wWg.Done() }() - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(src) != len(blk.literals) || len(src) != e.o.blockSize { - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - blk.encodeRaw(src) - // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. - case nil: - default: - s.writeErr = err + s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if s.writeErr != nil { return } _, s.writeErr = s.w.Write(blk.output) @@ -511,7 +480,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { Checksum: false, DictID: 0, } - dst, _ = fh.appendTo(dst) + dst = fh.appendTo(dst) // Write raw block as last one only. var blk blockHeader @@ -546,10 +515,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { dst = make([]byte, 0, len(src)) } - dst, err := fh.appendTo(dst) - if err != nil { - panic(err) - } + dst = fh.appendTo(dst) // If we can do everything in one block, prefer that. if len(src) <= e.o.blockSize { @@ -568,25 +534,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // If we got the exact same number of literals as input, // assume the literals cannot be compressed. - err := errIncompressible oldout := blk.output - if len(blk.literals) != len(src) || len(src) != e.o.blockSize { - // Output directly to dst - blk.output = dst - err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - } + // Output directly to dst + blk.output = dst - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, src) - case nil: - dst = blk.output - default: + err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = blk.output blk.output = oldout } else { enc.Reset(e.o.dict, false) @@ -605,25 +561,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { if len(src) == 0 { blk.last = true } - err := errIncompressible - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { - err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - } - - switch err { - case errIncompressible: - if debugEncoder { - println("Storing incompressible block as raw") - } - dst = blk.encodeRawTo(dst, todo) - blk.popOffsets() - case nil: - dst = append(dst, blk.output...) - default: + err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + if err != nil { panic(err) } + dst = append(dst, blk.output...) blk.reset(nil) } } @@ -633,6 +575,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { // Add padding with content from crypto/rand.Reader if e.o.pad > 0 { add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + var err error dst, err = skippableFrame(dst, add, rand.Reader) if err != nil { panic(err) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 8e15be2f..20671dcb 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -39,7 +39,7 @@ func (o *encoderOptions) setDefault() { blockSize: maxCompressedBlockSize, windowSize: 8 << 20, level: SpeedDefault, - allLitEntropy: true, + allLitEntropy: false, lowMem: false, } } @@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption { // The value must be a power of two between MinWindowSize and MaxWindowSize. // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. -// The default value is determined by the compression level. +// The default value is determined by the compression level and max 8MB. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -129,7 +129,7 @@ func WithEncoderPadding(n int) EOption { } // No need to waste our time. if n == 1 { - o.pad = 0 + n = 0 } if n > 1<<30 { return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") @@ -232,13 +232,13 @@ func WithEncoderLevel(l EncoderLevel) EOption { case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: - o.windowSize = 16 << 20 + o.windowSize = 8 << 20 case SpeedBestCompression: - o.windowSize = 32 << 20 + o.windowSize = 8 << 20 } } if !o.customALEntropy { - o.allLitEntropy = l > SpeedFastest + o.allLitEntropy = l > SpeedDefault } return nil diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index d8e8a05b..53e160f7 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -73,20 +73,20 @@ func (d *frameDec) reset(br byteBuffer) error { switch err { case io.EOF, io.ErrUnexpectedEOF: return io.EOF - default: - return err case nil: signature[0] = b[0] + default: + return err } // Read the rest, don't allow io.ErrUnexpectedEOF b, err = br.readSmall(3) switch err { case io.EOF: return io.EOF - default: - return err case nil: copy(signature[1:], b) + default: + return err } if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { @@ -293,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error { return nil } -// checkCRC will check the checksum if the frame has one. +// checkCRC will check the checksum, assuming the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { - if !d.HasCheckSum { - return nil - } - // We can overwrite upper tmp now buf, err := d.rawInput.readSmall(4) if err != nil { @@ -307,10 +303,6 @@ func (d *frameDec) checkCRC() error { return err } - if d.o.ignoreChecksum { - return nil - } - want := binary.LittleEndian.Uint32(buf[:4]) got := uint32(d.crc.Sum64()) @@ -326,17 +318,13 @@ func (d *frameDec) checkCRC() error { return nil } -// consumeCRC reads the checksum data if the frame has one. +// consumeCRC skips over the checksum, assuming the frame has one. func (d *frameDec) consumeCRC() error { - if d.HasCheckSum { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) } - - return nil + return err } // runDecoder will run the decoder for the remainder of the frame. @@ -415,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { if d.o.ignoreChecksum { err = d.consumeCRC() } else { - var n int - n, err = d.crc.Write(dst[crcStart:]) - if err == nil { - if n != len(dst)-crcStart { - err = io.ErrShortWrite - } else { - err = d.checkCRC() - } - } + d.crc.Write(dst[crcStart:]) + err = d.checkCRC() } } } diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 4ef7f5a3..667ca067 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -22,7 +22,7 @@ type frameHeader struct { const maxHeaderSize = 14 -func (f frameHeader) appendTo(dst []byte) ([]byte, error) { +func (f frameHeader) appendTo(dst []byte) []byte { dst = append(dst, frameMagic...) var fhd uint8 if f.Checksum { @@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) { if f.SingleSegment { dst = append(dst, uint8(f.ContentSize)) } - // Unless SingleSegment is set, framessizes < 256 are nto stored. + // Unless SingleSegment is set, framessizes < 256 are not stored. case 1: f.ContentSize -= 256 dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) @@ -88,7 +88,7 @@ func (f frameHeader) appendTo(dst []byte) ([]byte, error) { default: panic("invalid fcs") } - return dst, nil + return dst } const skippableFrameHeader = 4 + 4 diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 332e51fe..8adfebb0 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error { if v == -1 { s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) + v = 1 } + symbolNext[i] = uint16(v) } } @@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error { for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { + for { // lowprob area position = (position + step) & tableMask + if position <= highThreshold { + break + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go new file mode 100644 index 00000000..f41932b7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go @@ -0,0 +1,16 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +// matchLen returns how many bytes match in a and b +// +// It assumes that: +// +// len(a) <= len(b) and len(a) > 0 +// +//go:noescape +func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s new file mode 100644 index 00000000..9a7655c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -0,0 +1,68 @@ +// Copied from S2 implementation. + +//go:build !appengine && !noasm && gc && !noasm + +#include "textflag.h" + +// func matchLen(a []byte, b []byte) int +// Requires: BMI +TEXT ·matchLen(SB), NOSPLIT, $0-56 + MOVQ a_base+0(FP), AX + MOVQ b_base+24(FP), CX + MOVQ a_len+8(FP), DX + + // matchLen + XORL SI, SI + CMPL DX, $0x08 + JB matchlen_match4_standalone + +matchlen_loopback_standalone: + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + TESTQ BX, BX + JZ matchlen_loop_standalone + +#ifdef GOAMD64_v3 + TZCNTQ BX, BX +#else + BSFQ BX, BX +#endif + SARQ $0x03, BX + LEAL (SI)(BX*1), SI + JMP gen_match_len_end + +matchlen_loop_standalone: + LEAL -8(DX), DX + LEAL 8(SI), SI + CMPL DX, $0x08 + JAE matchlen_loopback_standalone + +matchlen_match4_standalone: + CMPL DX, $0x04 + JB matchlen_match2_standalone + MOVL (AX)(SI*1), BX + CMPL (CX)(SI*1), BX + JNE matchlen_match2_standalone + LEAL -4(DX), DX + LEAL 4(SI), SI + +matchlen_match2_standalone: + CMPL DX, $0x02 + JB matchlen_match1_standalone + MOVW (AX)(SI*1), BX + CMPW (CX)(SI*1), BX + JNE matchlen_match1_standalone + LEAL -2(DX), DX + LEAL 2(SI), SI + +matchlen_match1_standalone: + CMPL DX, $0x01 + JB gen_match_len_end + MOVB (AX)(SI*1), BL + CMPB (CX)(SI*1), BL + JNE gen_match_len_end + INCL SI + +gen_match_len_end: + MOVQ SI, ret+48(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go new file mode 100644 index 00000000..57b9c31c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -0,0 +1,33 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "encoding/binary" + "math/bits" +) + +// matchLen returns the maximum common prefix length of a and b. +// a must be the shortest of the two. +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 + } + n += 8 + } + + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n + +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index f833d154..d7fe6d82 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -236,13 +236,16 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { maxBlockSize = s.windowSize } + if debugDecoder { + println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") + } for i := seqs - 1; i >= 0; i-- { if br.overread() { - printf("reading sequence %d, exceeded available data\n", seqs-i) + printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) return io.ErrUnexpectedEOF } var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) @@ -314,9 +317,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } size := ll + ml + len(out) if size-startSize > maxBlockSize { - if size-startSize == 424242 { - panic("here") - } return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } if size > cap(out) { @@ -427,8 +427,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { } } - // Check if space for literals - if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } @@ -453,18 +452,13 @@ func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) // extra bits are stored in reverse order. br.fill() - if s.maxBits <= 32 { - mo += br.getBits(moB) - ml += br.getBits(mlB) - ll += br.getBits(llB) - } else { - mo += br.getBits(moB) + mo += br.getBits(moB) + if s.maxBits > 32 { br.fill() - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - } + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) mo = s.adjustOffset(mo, ll, moB) return } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 191384ad..8adabd82 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -5,6 +5,7 @@ package zstd import ( "fmt" + "io" "github.com/klauspost/compress/internal/cpuinfo" ) @@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ctx.ll, ctx.litRemain+ctx.ll) + case errorOverread: + return true, io.ErrUnexpectedEOF + case errorNotEnoughSpace: size := ctx.outPosition + ctx.ll + ctx.ml if debugDecoder { @@ -148,7 +152,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { s.seqSize += ctx.litRemain if s.seqSize > maxBlockSize { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } err := br.close() if err != nil { @@ -203,6 +206,9 @@ const errorNotEnoughLiterals = 4 // error reported when capacity of `out` is too small const errorNotEnoughSpace = 5 +// error reported when bits are overread. +const errorOverread = 6 + // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. // // Please refer to seqdec_generic.go for the reference implementation. @@ -248,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { litRemain: len(s.literals), } + if debugDecoder { + println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") + } + s.seqSize = 0 lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 var errCode int @@ -278,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { case errorNotEnoughLiterals: ll := ctx.seqs[i].ll return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + case errorOverread: + return io.ErrUnexpectedEOF } return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) @@ -292,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { if s.seqSize > maxBlockSize { return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) } + if debugDecoder { + println("decode: ", br.remain(), "bits remain on stream. code:", errCode) + } err := br.close() if err != nil { printf("Closing sequences: %v, %+v\n", err, *br) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index b94993a0..5b06174b 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -5,11 +5,11 @@ // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop: sequenceDecs_decode_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_end + JLE sequenceDecs_decode_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_end SHLQ $0x08, DX @@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_byte_by_byte +sequenceDecs_decode_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_end: // Update offset MOVQ R9, AX @@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero: sequenceDecs_decode_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_end + JLE sequenceDecs_decode_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_amd64_fill_2_end SHLQ $0x08, DX @@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte +sequenceDecs_decode_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -149,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -169,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -189,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -293,9 +298,9 @@ sequenceDecs_decode_amd64_match_len_ofs_ok: MOVQ R12, 152(AX) MOVQ R13, 160(AX) MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -320,14 +325,19 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -356,7 +366,7 @@ sequenceDecs_decode_56_amd64_main_loop: sequenceDecs_decode_56_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_end + JLE sequenceDecs_decode_56_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decode_56_amd64_fill_end SHLQ $0x08, DX @@ -367,6 +377,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte +sequenceDecs_decode_56_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decode_56_amd64_fill_end: // Update offset MOVQ R9, AX @@ -442,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -462,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -482,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -586,9 +597,9 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok: MOVQ R12, 152(AX) MOVQ R13, 160(AX) MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -613,14 +624,19 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -649,7 +665,7 @@ sequenceDecs_decode_bmi2_main_loop: sequenceDecs_decode_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_end + JLE sequenceDecs_decode_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_end SHLQ $0x08, AX @@ -660,6 +676,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_byte_by_byte +sequenceDecs_decode_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -700,7 +720,7 @@ sequenceDecs_decode_bmi2_fill_end: sequenceDecs_decode_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_end + JLE sequenceDecs_decode_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_bmi2_fill_2_end SHLQ $0x08, AX @@ -711,6 +731,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte +sequenceDecs_decode_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -742,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -754,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -766,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -862,9 +883,9 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok: MOVQ R11, 152(CX) MOVQ R12, 160(CX) MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -889,14 +910,19 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -925,7 +951,7 @@ sequenceDecs_decode_56_bmi2_main_loop: sequenceDecs_decode_56_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_end + JLE sequenceDecs_decode_56_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decode_56_bmi2_fill_end SHLQ $0x08, AX @@ -936,6 +962,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte +sequenceDecs_decode_56_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decode_56_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -993,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -1005,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -1017,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1113,9 +1140,9 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok: MOVQ R11, 152(CX) MOVQ R12, 160(CX) MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -1140,6 +1167,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // Requires: SSE TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 @@ -1753,11 +1785,11 @@ empty_seqs: // func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: CMOV, SSE TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -1804,7 +1836,7 @@ sequenceDecs_decodeSync_amd64_main_loop: sequenceDecs_decodeSync_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_end + JLE sequenceDecs_decodeSync_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_end SHLQ $0x08, DX @@ -1815,6 +1847,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_end: // Update offset MOVQ R9, AX @@ -1871,7 +1907,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero: sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_end + JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_amd64_fill_2_end SHLQ $0x08, DX @@ -1882,6 +1918,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -1915,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -1935,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -1955,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2243,9 +2280,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) // Update the context MOVQ ctx+16(FP), AX @@ -2291,6 +2328,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2305,11 +2347,11 @@ error_not_enough_space: // func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: BMI, BMI2, CMOV, SSE TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -2356,7 +2398,7 @@ sequenceDecs_decodeSync_bmi2_main_loop: sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_end + JLE sequenceDecs_decodeSync_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_end SHLQ $0x08, AX @@ -2367,6 +2409,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -2407,7 +2453,7 @@ sequenceDecs_decodeSync_bmi2_fill_end: sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_bmi2_fill_2_end SHLQ $0x08, AX @@ -2418,6 +2464,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -2449,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -2461,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -2473,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -2753,9 +2800,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) // Update the context MOVQ ctx+16(FP), AX @@ -2801,6 +2848,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -2815,11 +2867,11 @@ error_not_enough_space: // func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: CMOV, SSE TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), AX - MOVQ 32(AX), DX - MOVBQZX 40(AX), BX - MOVQ 24(AX), SI - MOVQ (AX), AX + MOVQ br+8(FP), CX + MOVQ 24(CX), DX + MOVBQZX 32(CX), BX + MOVQ (CX), AX + MOVQ 8(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -2866,7 +2918,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop: sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_end SHLQ $0x08, DX @@ -2877,6 +2929,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_end: // Update offset MOVQ R9, AX @@ -2933,7 +2989,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero: sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread CMPQ BX, $0x07 JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end SHLQ $0x08, DX @@ -2944,6 +3000,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: ORQ AX, DX JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: + CMPQ BX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_amd64_fill_2_end: // Update literal length MOVQ DI, AX @@ -2977,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2997,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3017,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3407,9 +3464,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX - MOVQ DX, 32(AX) - MOVB BL, 40(AX) - MOVQ SI, 24(AX) + MOVQ DX, 24(AX) + MOVB BL, 32(AX) + MOVQ SI, 8(AX) // Update the context MOVQ ctx+16(FP), AX @@ -3455,6 +3512,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX @@ -3469,11 +3531,11 @@ error_not_enough_space: // func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int // Requires: BMI, BMI2, CMOV, SSE TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 32(CX), AX - MOVBQZX 40(CX), DX - MOVQ 24(CX), BX - MOVQ (CX), CX + MOVQ br+8(FP), BX + MOVQ 24(BX), AX + MOVBQZX 32(BX), DX + MOVQ (BX), CX + MOVQ 8(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -3520,7 +3582,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop: sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_end SHLQ $0x08, AX @@ -3531,6 +3593,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_end: // Update offset MOVQ $0x00000808, CX @@ -3571,7 +3637,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end: sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread CMPQ DX, $0x07 JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end SHLQ $0x08, AX @@ -3582,6 +3648,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: ORQ CX, AX JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte +sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: + CMPQ DX, $0x40 + JA error_overread + sequenceDecs_decodeSync_safe_bmi2_fill_2_end: // Update literal length MOVQ $0x00000808, CX @@ -3613,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -3625,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -3637,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -4019,9 +4086,9 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX - MOVQ AX, 32(CX) - MOVB DL, 40(CX) - MOVQ BX, 24(CX) + MOVQ AX, 24(CX) + MOVB DL, 32(CX) + MOVQ BX, 8(CX) // Update the context MOVQ ctx+16(FP), AX @@ -4067,6 +4134,11 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET + // Return with overread error +error_overread: + MOVQ $0x00000006, ret+24(FP) + RET + // Return with not enough output space error error_not_enough_space: MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index ac2a80d2..2fb35b78 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { } for i := range seqs { var ll, mo, ml int - if br.off > 4+((maxOffsetBits+16+16)>>3) { + if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index 9e1baad7..ec13594e 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -95,10 +95,9 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { var written int64 var readHeader bool { - var header []byte - var n int - header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + var n int n, r.err = w.Write(header) if r.err != nil { return written, r.err diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 5ffa82f5..4be7cc73 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -9,7 +9,6 @@ import ( "errors" "log" "math" - "math/bits" ) // enable debug printing @@ -106,33 +105,12 @@ func printf(format string, a ...interface{}) { } } -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} - func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) + return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) + return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) } type byter interface { diff --git a/vendor/github.com/klauspost/cpuid/v2/README.md b/vendor/github.com/klauspost/cpuid/v2/README.md index 857a93e5..30f8d296 100644 --- a/vendor/github.com/klauspost/cpuid/v2/README.md +++ b/vendor/github.com/klauspost/cpuid/v2/README.md @@ -9,16 +9,19 @@ You can access the CPU information by accessing the shared CPU variable of the c Package home: https://github.com/klauspost/cpuid [![PkgGoDev](https://pkg.go.dev/badge/github.com/klauspost/cpuid)](https://pkg.go.dev/github.com/klauspost/cpuid/v2) -[![Build Status][3]][4] - -[3]: https://travis-ci.org/klauspost/cpuid.svg?branch=master -[4]: https://travis-ci.org/klauspost/cpuid +[![Go](https://github.com/klauspost/cpuid/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/cpuid/actions/workflows/go.yml) ## installing `go get -u github.com/klauspost/cpuid/v2` using modules. Drop `v2` for others. +Installing binary: + +`go install github.com/klauspost/cpuid/v2/cmd/cpuid@latest` + +Or download binaries from release page: https://github.com/klauspost/cpuid/releases + ### Homebrew For macOS/Linux users, you can install via [brew](https://brew.sh/) @@ -279,7 +282,12 @@ Exit Code 1 | AMXINT8 | Tile computational operations on 8-bit integers | | AMXFP16 | Tile computational operations on FP16 numbers | | AMXTILE | Tile architecture | +| APX_F | Intel APX | | AVX | AVX functions | +| AVX10 | If set the Intel AVX10 Converged Vector ISA is supported | +| AVX10_128 | If set indicates that AVX10 128-bit vector support is present | +| AVX10_256 | If set indicates that AVX10 256-bit vector support is present | +| AVX10_512 | If set indicates that AVX10 512-bit vector support is present | | AVX2 | AVX2 functions | | AVX512BF16 | AVX-512 BFLOAT16 Instructions | | AVX512BITALG | AVX-512 Bit Algorithms | @@ -302,6 +310,7 @@ Exit Code 1 | AVXSLOW | Indicates the CPU performs 2 128 bit operations instead of one | | AVXVNNI | AVX (VEX encoded) VNNI neural network instructions | | AVXVNNIINT8 | AVX-VNNI-INT8 instructions | +| BHI_CTRL | Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 | | BMI1 | Bit Manipulation Instruction Set 1 | | BMI2 | Bit Manipulation Instruction Set 2 | | CETIBT | Intel CET Indirect Branch Tracking | @@ -355,8 +364,11 @@ Exit Code 1 | IBS_OPFUSE | AMD: Indicates support for IbsOpFuse | | IBS_PREVENTHOST | Disallowing IBS use by the host supported | | IBS_ZEN4 | Fetch and Op IBS support IBS extensions added with Zen4 | +| IDPRED_CTRL | IPRED_DIS | | INT_WBINVD | WBINVD/WBNOINVD are interruptible. | | INVLPGB | NVLPGB and TLBSYNC instruction supported | +| KEYLOCKER | Key locker | +| KEYLOCKERW | Key locker wide | | LAHF | LAHF/SAHF in long mode | | LAM | If set, CPU supports Linear Address Masking | | LBRVIRT | LBR virtualization | @@ -374,6 +386,7 @@ Exit Code 1 | MPX | Intel MPX (Memory Protection Extensions) | | MOVU | MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD | | MSRIRC | Instruction Retired Counter MSR available | +| MSRLIST | Read/Write List of Model Specific Registers | | MSR_PAGEFLUSH | Page Flush MSR available | | NRIPS | Indicates support for NRIP save on VMEXIT | | NX | NX (No-Execute) bit | @@ -381,12 +394,13 @@ Exit Code 1 | PCONFIG | PCONFIG for Intel Multi-Key Total Memory Encryption | | POPCNT | POPCNT instruction | | PPIN | AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled | -| PREFETCHI | PREFETCHIT0/1 instructions | -| PSFD | AMD: Predictive Store Forward Disable | +| PREFETCHI | PREFETCHIT0/1 instructions | +| PSFD | Predictive Store Forward Disable | | RDPRU | RDPRU instruction supported | | RDRAND | RDRAND instruction is available | | RDSEED | RDSEED instruction is available | | RDTSCP | RDTSCP Instruction | +| RRSBA_CTRL | Restricted RSB Alternate | | RTM | Restricted Transactional Memory | | RTM_ALWAYS_ABORT | Indicates that the loaded microcode is forcing RTM abort. | | SERIALIZE | Serialize Instruction Execution | @@ -425,6 +439,7 @@ Exit Code 1 | SYSCALL | System-Call Extension (SCE): SYSCALL and SYSRET instructions. | | SYSEE | SYSENTER and SYSEXIT instructions | | TBM | AMD Trailing Bit Manipulation | +| TDX_GUEST | Intel Trust Domain Extensions Guest | | TLB_FLUSH_NESTED | AMD: Flushing includes all the nested translations for guest translations | | TME | Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. | | TOPEXT | TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. | @@ -439,6 +454,7 @@ Exit Code 1 | VTE | AMD Virtual Transparent Encryption supported | | WAITPKG | TPAUSE, UMONITOR, UMWAIT | | WBNOINVD | Write Back and Do Not Invalidate Cache | +| WRMSRNS | Non-Serializing Write to Model Specific Register | | X87 | FPU | | XGETBV1 | Supports XGETBV with ECX = 1 | | XOP | Bulldozer XOP functions | diff --git a/vendor/github.com/klauspost/cpuid/v2/cpuid.go b/vendor/github.com/klauspost/cpuid/v2/cpuid.go index cf2ae9c5..15b76033 100644 --- a/vendor/github.com/klauspost/cpuid/v2/cpuid.go +++ b/vendor/github.com/klauspost/cpuid/v2/cpuid.go @@ -76,7 +76,12 @@ const ( AMXFP16 // Tile computational operations on FP16 numbers AMXINT8 // Tile computational operations on 8-bit integers AMXTILE // Tile architecture + APX_F // Intel APX AVX // AVX functions + AVX10 // If set the Intel AVX10 Converged Vector ISA is supported + AVX10_128 // If set indicates that AVX10 128-bit vector support is present + AVX10_256 // If set indicates that AVX10 256-bit vector support is present + AVX10_512 // If set indicates that AVX10 512-bit vector support is present AVX2 // AVX2 functions AVX512BF16 // AVX-512 BFLOAT16 Instructions AVX512BITALG // AVX-512 Bit Algorithms @@ -99,6 +104,7 @@ const ( AVXSLOW // Indicates the CPU performs 2 128 bit operations instead of one AVXVNNI // AVX (VEX encoded) VNNI neural network instructions AVXVNNIINT8 // AVX-VNNI-INT8 instructions + BHI_CTRL // Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 BMI1 // Bit Manipulation Instruction Set 1 BMI2 // Bit Manipulation Instruction Set 2 CETIBT // Intel CET Indirect Branch Tracking @@ -152,8 +158,11 @@ const ( IBS_OPFUSE // AMD: Indicates support for IbsOpFuse IBS_PREVENTHOST // Disallowing IBS use by the host supported IBS_ZEN4 // AMD: Fetch and Op IBS support IBS extensions added with Zen4 + IDPRED_CTRL // IPRED_DIS INT_WBINVD // WBINVD/WBNOINVD are interruptible. INVLPGB // NVLPGB and TLBSYNC instruction supported + KEYLOCKER // Key locker + KEYLOCKERW // Key locker wide LAHF // LAHF/SAHF in long mode LAM // If set, CPU supports Linear Address Masking LBRVIRT // LBR virtualization @@ -171,6 +180,7 @@ const ( MOVU // AMD: MOVU SSE instructions are more efficient and should be preferred to SSE MOVL/MOVH. MOVUPS is more efficient than MOVLPS/MOVHPS. MOVUPD is more efficient than MOVLPD/MOVHPD MPX // Intel MPX (Memory Protection Extensions) MSRIRC // Instruction Retired Counter MSR available + MSRLIST // Read/Write List of Model Specific Registers MSR_PAGEFLUSH // Page Flush MSR available NRIPS // Indicates support for NRIP save on VMEXIT NX // NX (No-Execute) bit @@ -179,11 +189,12 @@ const ( POPCNT // POPCNT instruction PPIN // AMD: Protected Processor Inventory Number support. Indicates that Protected Processor Inventory Number (PPIN) capability can be enabled PREFETCHI // PREFETCHIT0/1 instructions - PSFD // AMD: Predictive Store Forward Disable + PSFD // Predictive Store Forward Disable RDPRU // RDPRU instruction supported RDRAND // RDRAND instruction is available RDSEED // RDSEED instruction is available RDTSCP // RDTSCP Instruction + RRSBA_CTRL // Restricted RSB Alternate RTM // Restricted Transactional Memory RTM_ALWAYS_ABORT // Indicates that the loaded microcode is forcing RTM abort. SERIALIZE // Serialize Instruction Execution @@ -222,6 +233,7 @@ const ( SYSCALL // System-Call Extension (SCE): SYSCALL and SYSRET instructions. SYSEE // SYSENTER and SYSEXIT instructions TBM // AMD Trailing Bit Manipulation + TDX_GUEST // Intel Trust Domain Extensions Guest TLB_FLUSH_NESTED // AMD: Flushing includes all the nested translations for guest translations TME // Intel Total Memory Encryption. The following MSRs are supported: IA32_TME_CAPABILITY, IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE. TOPEXT // TopologyExtensions: topology extensions support. Indicates support for CPUID Fn8000_001D_EAX_x[N:0]-CPUID Fn8000_001E_EDX. @@ -236,6 +248,7 @@ const ( VTE // AMD Virtual Transparent Encryption supported WAITPKG // TPAUSE, UMONITOR, UMWAIT WBNOINVD // Write Back and Do Not Invalidate Cache + WRMSRNS // Non-Serializing Write to Model Specific Register X87 // FPU XGETBV1 // Supports XGETBV with ECX = 1 XOP // Bulldozer XOP functions @@ -296,9 +309,10 @@ type CPUInfo struct { L2 int // L2 Cache (per core or shared). Will be -1 if undetected L3 int // L3 Cache (per core, per ccx or shared). Will be -1 if undetected } - SGX SGXSupport - maxFunc uint32 - maxExFunc uint32 + SGX SGXSupport + AVX10Level uint8 + maxFunc uint32 + maxExFunc uint32 } var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) @@ -1159,6 +1173,7 @@ func support() flagSet { fs.setIf(ecx&(1<<10) != 0, VPCLMULQDQ) fs.setIf(ecx&(1<<13) != 0, TME) fs.setIf(ecx&(1<<25) != 0, CLDEMOTE) + fs.setIf(ecx&(1<<23) != 0, KEYLOCKER) fs.setIf(ecx&(1<<27) != 0, MOVDIRI) fs.setIf(ecx&(1<<28) != 0, MOVDIR64B) fs.setIf(ecx&(1<<29) != 0, ENQCMD) @@ -1181,13 +1196,8 @@ func support() flagSet { fs.setIf(edx&(1<<30) != 0, IA32_CORE_CAP) fs.setIf(edx&(1<<31) != 0, SPEC_CTRL_SSBD) - // CPUID.(EAX=7, ECX=1).EDX - fs.setIf(edx&(1<<4) != 0, AVXVNNIINT8) - fs.setIf(edx&(1<<5) != 0, AVXNECONVERT) - fs.setIf(edx&(1<<14) != 0, PREFETCHI) - // CPUID.(EAX=7, ECX=1).EAX - eax1, _, _, _ := cpuidex(7, 1) + eax1, _, _, edx1 := cpuidex(7, 1) fs.setIf(fs.inSet(AVX) && eax1&(1<<4) != 0, AVXVNNI) fs.setIf(eax1&(1<<7) != 0, CMPCCXADD) fs.setIf(eax1&(1<<10) != 0, MOVSB_ZL) @@ -1197,6 +1207,13 @@ func support() flagSet { fs.setIf(eax1&(1<<23) != 0, AVXIFMA) fs.setIf(eax1&(1<<26) != 0, LAM) + // CPUID.(EAX=7, ECX=1).EDX + fs.setIf(edx1&(1<<4) != 0, AVXVNNIINT8) + fs.setIf(edx1&(1<<5) != 0, AVXNECONVERT) + fs.setIf(edx1&(1<<14) != 0, PREFETCHI) + fs.setIf(edx1&(1<<19) != 0, AVX10) + fs.setIf(edx1&(1<<21) != 0, APX_F) + // Only detect AVX-512 features if XGETBV is supported if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { // Check for OS support @@ -1232,13 +1249,33 @@ func support() flagSet { fs.setIf(edx&(1<<25) != 0, AMXINT8) // eax1 = CPUID.(EAX=7, ECX=1).EAX fs.setIf(eax1&(1<<5) != 0, AVX512BF16) + fs.setIf(eax1&(1<<19) != 0, WRMSRNS) fs.setIf(eax1&(1<<21) != 0, AMXFP16) + fs.setIf(eax1&(1<<27) != 0, MSRLIST) } } // CPUID.(EAX=7, ECX=2) _, _, _, edx = cpuidex(7, 2) + fs.setIf(edx&(1<<0) != 0, PSFD) + fs.setIf(edx&(1<<1) != 0, IDPRED_CTRL) + fs.setIf(edx&(1<<2) != 0, RRSBA_CTRL) + fs.setIf(edx&(1<<4) != 0, BHI_CTRL) fs.setIf(edx&(1<<5) != 0, MCDT_NO) + + // Add keylocker features. + if fs.inSet(KEYLOCKER) && mfi >= 0x19 { + _, ebx, _, _ := cpuidex(0x19, 0) + fs.setIf(ebx&5 == 5, KEYLOCKERW) // Bit 0 and 2 (1+4) + } + + // Add AVX10 features. + if fs.inSet(AVX10) && mfi >= 0x24 { + _, ebx, _, _ := cpuidex(0x24, 0) + fs.setIf(ebx&(1<<16) != 0, AVX10_128) + fs.setIf(ebx&(1<<17) != 0, AVX10_256) + fs.setIf(ebx&(1<<18) != 0, AVX10_512) + } } // Processor Extended State Enumeration Sub-leaf (EAX = 0DH, ECX = 1) @@ -1381,9 +1418,38 @@ func support() flagSet { fs.setIf((a>>24)&1 == 1, VMSA_REGPROT) } + if mfi >= 0x20 { + // Microsoft has decided to purposefully hide the information + // of the guest TEE when VMs are being created using Hyper-V. + // + // This leads us to check for the Hyper-V cpuid features + // (0x4000000C), and then for the `ebx` value set. + // + // For Intel TDX, `ebx` is set as `0xbe3`, being 3 the part + // we're mostly interested about,according to: + // https://github.com/torvalds/linux/blob/d2f51b3516dade79269ff45eae2a7668ae711b25/arch/x86/include/asm/hyperv-tlfs.h#L169-L174 + _, ebx, _, _ := cpuid(0x4000000C) + fs.setIf(ebx == 0xbe3, TDX_GUEST) + } + + if mfi >= 0x21 { + // Intel Trusted Domain Extensions Guests have their own cpuid leaf (0x21). + _, ebx, ecx, edx := cpuid(0x21) + identity := string(valAsString(ebx, edx, ecx)) + fs.setIf(identity == "IntelTDX ", TDX_GUEST) + } + return fs } +func (c *CPUInfo) supportAVX10() uint8 { + if c.maxFunc >= 0x24 && c.featureSet.inSet(AVX10) { + _, ebx, _, _ := cpuidex(0x24, 0) + return uint8(ebx) + } + return 0 +} + func valAsString(values ...uint32) []byte { r := make([]byte, 4*len(values)) for i, v := range values { diff --git a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go index c946824e..c7dfa125 100644 --- a/vendor/github.com/klauspost/cpuid/v2/detect_x86.go +++ b/vendor/github.com/klauspost/cpuid/v2/detect_x86.go @@ -31,6 +31,7 @@ func addInfo(c *CPUInfo, safe bool) { c.LogicalCores = logicalCores() c.PhysicalCores = physicalCores() c.VendorID, c.VendorString = vendorID() + c.AVX10Level = c.supportAVX10() c.cacheSize() c.frequencies() } diff --git a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go index 8b6cd2b7..43bd05f5 100644 --- a/vendor/github.com/klauspost/cpuid/v2/featureid_string.go +++ b/vendor/github.com/klauspost/cpuid/v2/featureid_string.go @@ -16,204 +16,217 @@ func _() { _ = x[AMXFP16-6] _ = x[AMXINT8-7] _ = x[AMXTILE-8] - _ = x[AVX-9] - _ = x[AVX2-10] - _ = x[AVX512BF16-11] - _ = x[AVX512BITALG-12] - _ = x[AVX512BW-13] - _ = x[AVX512CD-14] - _ = x[AVX512DQ-15] - _ = x[AVX512ER-16] - _ = x[AVX512F-17] - _ = x[AVX512FP16-18] - _ = x[AVX512IFMA-19] - _ = x[AVX512PF-20] - _ = x[AVX512VBMI-21] - _ = x[AVX512VBMI2-22] - _ = x[AVX512VL-23] - _ = x[AVX512VNNI-24] - _ = x[AVX512VP2INTERSECT-25] - _ = x[AVX512VPOPCNTDQ-26] - _ = x[AVXIFMA-27] - _ = x[AVXNECONVERT-28] - _ = x[AVXSLOW-29] - _ = x[AVXVNNI-30] - _ = x[AVXVNNIINT8-31] - _ = x[BMI1-32] - _ = x[BMI2-33] - _ = x[CETIBT-34] - _ = x[CETSS-35] - _ = x[CLDEMOTE-36] - _ = x[CLMUL-37] - _ = x[CLZERO-38] - _ = x[CMOV-39] - _ = x[CMPCCXADD-40] - _ = x[CMPSB_SCADBS_SHORT-41] - _ = x[CMPXCHG8-42] - _ = x[CPBOOST-43] - _ = x[CPPC-44] - _ = x[CX16-45] - _ = x[EFER_LMSLE_UNS-46] - _ = x[ENQCMD-47] - _ = x[ERMS-48] - _ = x[F16C-49] - _ = x[FLUSH_L1D-50] - _ = x[FMA3-51] - _ = x[FMA4-52] - _ = x[FP128-53] - _ = x[FP256-54] - _ = x[FSRM-55] - _ = x[FXSR-56] - _ = x[FXSROPT-57] - _ = x[GFNI-58] - _ = x[HLE-59] - _ = x[HRESET-60] - _ = x[HTT-61] - _ = x[HWA-62] - _ = x[HYBRID_CPU-63] - _ = x[HYPERVISOR-64] - _ = x[IA32_ARCH_CAP-65] - _ = x[IA32_CORE_CAP-66] - _ = x[IBPB-67] - _ = x[IBRS-68] - _ = x[IBRS_PREFERRED-69] - _ = x[IBRS_PROVIDES_SMP-70] - _ = x[IBS-71] - _ = x[IBSBRNTRGT-72] - _ = x[IBSFETCHSAM-73] - _ = x[IBSFFV-74] - _ = x[IBSOPCNT-75] - _ = x[IBSOPCNTEXT-76] - _ = x[IBSOPSAM-77] - _ = x[IBSRDWROPCNT-78] - _ = x[IBSRIPINVALIDCHK-79] - _ = x[IBS_FETCH_CTLX-80] - _ = x[IBS_OPDATA4-81] - _ = x[IBS_OPFUSE-82] - _ = x[IBS_PREVENTHOST-83] - _ = x[IBS_ZEN4-84] - _ = x[INT_WBINVD-85] - _ = x[INVLPGB-86] - _ = x[LAHF-87] - _ = x[LAM-88] - _ = x[LBRVIRT-89] - _ = x[LZCNT-90] - _ = x[MCAOVERFLOW-91] - _ = x[MCDT_NO-92] - _ = x[MCOMMIT-93] - _ = x[MD_CLEAR-94] - _ = x[MMX-95] - _ = x[MMXEXT-96] - _ = x[MOVBE-97] - _ = x[MOVDIR64B-98] - _ = x[MOVDIRI-99] - _ = x[MOVSB_ZL-100] - _ = x[MOVU-101] - _ = x[MPX-102] - _ = x[MSRIRC-103] - _ = x[MSR_PAGEFLUSH-104] - _ = x[NRIPS-105] - _ = x[NX-106] - _ = x[OSXSAVE-107] - _ = x[PCONFIG-108] - _ = x[POPCNT-109] - _ = x[PPIN-110] - _ = x[PREFETCHI-111] - _ = x[PSFD-112] - _ = x[RDPRU-113] - _ = x[RDRAND-114] - _ = x[RDSEED-115] - _ = x[RDTSCP-116] - _ = x[RTM-117] - _ = x[RTM_ALWAYS_ABORT-118] - _ = x[SERIALIZE-119] - _ = x[SEV-120] - _ = x[SEV_64BIT-121] - _ = x[SEV_ALTERNATIVE-122] - _ = x[SEV_DEBUGSWAP-123] - _ = x[SEV_ES-124] - _ = x[SEV_RESTRICTED-125] - _ = x[SEV_SNP-126] - _ = x[SGX-127] - _ = x[SGXLC-128] - _ = x[SHA-129] - _ = x[SME-130] - _ = x[SME_COHERENT-131] - _ = x[SPEC_CTRL_SSBD-132] - _ = x[SRBDS_CTRL-133] - _ = x[SSE-134] - _ = x[SSE2-135] - _ = x[SSE3-136] - _ = x[SSE4-137] - _ = x[SSE42-138] - _ = x[SSE4A-139] - _ = x[SSSE3-140] - _ = x[STIBP-141] - _ = x[STIBP_ALWAYSON-142] - _ = x[STOSB_SHORT-143] - _ = x[SUCCOR-144] - _ = x[SVM-145] - _ = x[SVMDA-146] - _ = x[SVMFBASID-147] - _ = x[SVML-148] - _ = x[SVMNP-149] - _ = x[SVMPF-150] - _ = x[SVMPFT-151] - _ = x[SYSCALL-152] - _ = x[SYSEE-153] - _ = x[TBM-154] - _ = x[TLB_FLUSH_NESTED-155] - _ = x[TME-156] - _ = x[TOPEXT-157] - _ = x[TSCRATEMSR-158] - _ = x[TSXLDTRK-159] - _ = x[VAES-160] - _ = x[VMCBCLEAN-161] - _ = x[VMPL-162] - _ = x[VMSA_REGPROT-163] - _ = x[VMX-164] - _ = x[VPCLMULQDQ-165] - _ = x[VTE-166] - _ = x[WAITPKG-167] - _ = x[WBNOINVD-168] - _ = x[X87-169] - _ = x[XGETBV1-170] - _ = x[XOP-171] - _ = x[XSAVE-172] - _ = x[XSAVEC-173] - _ = x[XSAVEOPT-174] - _ = x[XSAVES-175] - _ = x[AESARM-176] - _ = x[ARMCPUID-177] - _ = x[ASIMD-178] - _ = x[ASIMDDP-179] - _ = x[ASIMDHP-180] - _ = x[ASIMDRDM-181] - _ = x[ATOMICS-182] - _ = x[CRC32-183] - _ = x[DCPOP-184] - _ = x[EVTSTRM-185] - _ = x[FCMA-186] - _ = x[FP-187] - _ = x[FPHP-188] - _ = x[GPA-189] - _ = x[JSCVT-190] - _ = x[LRCPC-191] - _ = x[PMULL-192] - _ = x[SHA1-193] - _ = x[SHA2-194] - _ = x[SHA3-195] - _ = x[SHA512-196] - _ = x[SM3-197] - _ = x[SM4-198] - _ = x[SVE-199] - _ = x[lastID-200] + _ = x[APX_F-9] + _ = x[AVX-10] + _ = x[AVX10-11] + _ = x[AVX10_128-12] + _ = x[AVX10_256-13] + _ = x[AVX10_512-14] + _ = x[AVX2-15] + _ = x[AVX512BF16-16] + _ = x[AVX512BITALG-17] + _ = x[AVX512BW-18] + _ = x[AVX512CD-19] + _ = x[AVX512DQ-20] + _ = x[AVX512ER-21] + _ = x[AVX512F-22] + _ = x[AVX512FP16-23] + _ = x[AVX512IFMA-24] + _ = x[AVX512PF-25] + _ = x[AVX512VBMI-26] + _ = x[AVX512VBMI2-27] + _ = x[AVX512VL-28] + _ = x[AVX512VNNI-29] + _ = x[AVX512VP2INTERSECT-30] + _ = x[AVX512VPOPCNTDQ-31] + _ = x[AVXIFMA-32] + _ = x[AVXNECONVERT-33] + _ = x[AVXSLOW-34] + _ = x[AVXVNNI-35] + _ = x[AVXVNNIINT8-36] + _ = x[BHI_CTRL-37] + _ = x[BMI1-38] + _ = x[BMI2-39] + _ = x[CETIBT-40] + _ = x[CETSS-41] + _ = x[CLDEMOTE-42] + _ = x[CLMUL-43] + _ = x[CLZERO-44] + _ = x[CMOV-45] + _ = x[CMPCCXADD-46] + _ = x[CMPSB_SCADBS_SHORT-47] + _ = x[CMPXCHG8-48] + _ = x[CPBOOST-49] + _ = x[CPPC-50] + _ = x[CX16-51] + _ = x[EFER_LMSLE_UNS-52] + _ = x[ENQCMD-53] + _ = x[ERMS-54] + _ = x[F16C-55] + _ = x[FLUSH_L1D-56] + _ = x[FMA3-57] + _ = x[FMA4-58] + _ = x[FP128-59] + _ = x[FP256-60] + _ = x[FSRM-61] + _ = x[FXSR-62] + _ = x[FXSROPT-63] + _ = x[GFNI-64] + _ = x[HLE-65] + _ = x[HRESET-66] + _ = x[HTT-67] + _ = x[HWA-68] + _ = x[HYBRID_CPU-69] + _ = x[HYPERVISOR-70] + _ = x[IA32_ARCH_CAP-71] + _ = x[IA32_CORE_CAP-72] + _ = x[IBPB-73] + _ = x[IBRS-74] + _ = x[IBRS_PREFERRED-75] + _ = x[IBRS_PROVIDES_SMP-76] + _ = x[IBS-77] + _ = x[IBSBRNTRGT-78] + _ = x[IBSFETCHSAM-79] + _ = x[IBSFFV-80] + _ = x[IBSOPCNT-81] + _ = x[IBSOPCNTEXT-82] + _ = x[IBSOPSAM-83] + _ = x[IBSRDWROPCNT-84] + _ = x[IBSRIPINVALIDCHK-85] + _ = x[IBS_FETCH_CTLX-86] + _ = x[IBS_OPDATA4-87] + _ = x[IBS_OPFUSE-88] + _ = x[IBS_PREVENTHOST-89] + _ = x[IBS_ZEN4-90] + _ = x[IDPRED_CTRL-91] + _ = x[INT_WBINVD-92] + _ = x[INVLPGB-93] + _ = x[KEYLOCKER-94] + _ = x[KEYLOCKERW-95] + _ = x[LAHF-96] + _ = x[LAM-97] + _ = x[LBRVIRT-98] + _ = x[LZCNT-99] + _ = x[MCAOVERFLOW-100] + _ = x[MCDT_NO-101] + _ = x[MCOMMIT-102] + _ = x[MD_CLEAR-103] + _ = x[MMX-104] + _ = x[MMXEXT-105] + _ = x[MOVBE-106] + _ = x[MOVDIR64B-107] + _ = x[MOVDIRI-108] + _ = x[MOVSB_ZL-109] + _ = x[MOVU-110] + _ = x[MPX-111] + _ = x[MSRIRC-112] + _ = x[MSRLIST-113] + _ = x[MSR_PAGEFLUSH-114] + _ = x[NRIPS-115] + _ = x[NX-116] + _ = x[OSXSAVE-117] + _ = x[PCONFIG-118] + _ = x[POPCNT-119] + _ = x[PPIN-120] + _ = x[PREFETCHI-121] + _ = x[PSFD-122] + _ = x[RDPRU-123] + _ = x[RDRAND-124] + _ = x[RDSEED-125] + _ = x[RDTSCP-126] + _ = x[RRSBA_CTRL-127] + _ = x[RTM-128] + _ = x[RTM_ALWAYS_ABORT-129] + _ = x[SERIALIZE-130] + _ = x[SEV-131] + _ = x[SEV_64BIT-132] + _ = x[SEV_ALTERNATIVE-133] + _ = x[SEV_DEBUGSWAP-134] + _ = x[SEV_ES-135] + _ = x[SEV_RESTRICTED-136] + _ = x[SEV_SNP-137] + _ = x[SGX-138] + _ = x[SGXLC-139] + _ = x[SHA-140] + _ = x[SME-141] + _ = x[SME_COHERENT-142] + _ = x[SPEC_CTRL_SSBD-143] + _ = x[SRBDS_CTRL-144] + _ = x[SSE-145] + _ = x[SSE2-146] + _ = x[SSE3-147] + _ = x[SSE4-148] + _ = x[SSE42-149] + _ = x[SSE4A-150] + _ = x[SSSE3-151] + _ = x[STIBP-152] + _ = x[STIBP_ALWAYSON-153] + _ = x[STOSB_SHORT-154] + _ = x[SUCCOR-155] + _ = x[SVM-156] + _ = x[SVMDA-157] + _ = x[SVMFBASID-158] + _ = x[SVML-159] + _ = x[SVMNP-160] + _ = x[SVMPF-161] + _ = x[SVMPFT-162] + _ = x[SYSCALL-163] + _ = x[SYSEE-164] + _ = x[TBM-165] + _ = x[TDX_GUEST-166] + _ = x[TLB_FLUSH_NESTED-167] + _ = x[TME-168] + _ = x[TOPEXT-169] + _ = x[TSCRATEMSR-170] + _ = x[TSXLDTRK-171] + _ = x[VAES-172] + _ = x[VMCBCLEAN-173] + _ = x[VMPL-174] + _ = x[VMSA_REGPROT-175] + _ = x[VMX-176] + _ = x[VPCLMULQDQ-177] + _ = x[VTE-178] + _ = x[WAITPKG-179] + _ = x[WBNOINVD-180] + _ = x[WRMSRNS-181] + _ = x[X87-182] + _ = x[XGETBV1-183] + _ = x[XOP-184] + _ = x[XSAVE-185] + _ = x[XSAVEC-186] + _ = x[XSAVEOPT-187] + _ = x[XSAVES-188] + _ = x[AESARM-189] + _ = x[ARMCPUID-190] + _ = x[ASIMD-191] + _ = x[ASIMDDP-192] + _ = x[ASIMDHP-193] + _ = x[ASIMDRDM-194] + _ = x[ATOMICS-195] + _ = x[CRC32-196] + _ = x[DCPOP-197] + _ = x[EVTSTRM-198] + _ = x[FCMA-199] + _ = x[FP-200] + _ = x[FPHP-201] + _ = x[GPA-202] + _ = x[JSCVT-203] + _ = x[LRCPC-204] + _ = x[PMULL-205] + _ = x[SHA1-206] + _ = x[SHA2-207] + _ = x[SHA3-208] + _ = x[SHA512-209] + _ = x[SM3-210] + _ = x[SM4-211] + _ = x[SVE-212] + _ = x[lastID-213] _ = x[firstID-0] } -const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAVXAVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4INT_WBINVDINVLPGBLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" +const _FeatureID_name = "firstIDADXAESNIAMD3DNOWAMD3DNOWEXTAMXBF16AMXFP16AMXINT8AMXTILEAPX_FAVXAVX10AVX10_128AVX10_256AVX10_512AVX2AVX512BF16AVX512BITALGAVX512BWAVX512CDAVX512DQAVX512ERAVX512FAVX512FP16AVX512IFMAAVX512PFAVX512VBMIAVX512VBMI2AVX512VLAVX512VNNIAVX512VP2INTERSECTAVX512VPOPCNTDQAVXIFMAAVXNECONVERTAVXSLOWAVXVNNIAVXVNNIINT8BHI_CTRLBMI1BMI2CETIBTCETSSCLDEMOTECLMULCLZEROCMOVCMPCCXADDCMPSB_SCADBS_SHORTCMPXCHG8CPBOOSTCPPCCX16EFER_LMSLE_UNSENQCMDERMSF16CFLUSH_L1DFMA3FMA4FP128FP256FSRMFXSRFXSROPTGFNIHLEHRESETHTTHWAHYBRID_CPUHYPERVISORIA32_ARCH_CAPIA32_CORE_CAPIBPBIBRSIBRS_PREFERREDIBRS_PROVIDES_SMPIBSIBSBRNTRGTIBSFETCHSAMIBSFFVIBSOPCNTIBSOPCNTEXTIBSOPSAMIBSRDWROPCNTIBSRIPINVALIDCHKIBS_FETCH_CTLXIBS_OPDATA4IBS_OPFUSEIBS_PREVENTHOSTIBS_ZEN4IDPRED_CTRLINT_WBINVDINVLPGBKEYLOCKERKEYLOCKERWLAHFLAMLBRVIRTLZCNTMCAOVERFLOWMCDT_NOMCOMMITMD_CLEARMMXMMXEXTMOVBEMOVDIR64BMOVDIRIMOVSB_ZLMOVUMPXMSRIRCMSRLISTMSR_PAGEFLUSHNRIPSNXOSXSAVEPCONFIGPOPCNTPPINPREFETCHIPSFDRDPRURDRANDRDSEEDRDTSCPRRSBA_CTRLRTMRTM_ALWAYS_ABORTSERIALIZESEVSEV_64BITSEV_ALTERNATIVESEV_DEBUGSWAPSEV_ESSEV_RESTRICTEDSEV_SNPSGXSGXLCSHASMESME_COHERENTSPEC_CTRL_SSBDSRBDS_CTRLSSESSE2SSE3SSE4SSE42SSE4ASSSE3STIBPSTIBP_ALWAYSONSTOSB_SHORTSUCCORSVMSVMDASVMFBASIDSVMLSVMNPSVMPFSVMPFTSYSCALLSYSEETBMTDX_GUESTTLB_FLUSH_NESTEDTMETOPEXTTSCRATEMSRTSXLDTRKVAESVMCBCLEANVMPLVMSA_REGPROTVMXVPCLMULQDQVTEWAITPKGWBNOINVDWRMSRNSX87XGETBV1XOPXSAVEXSAVECXSAVEOPTXSAVESAESARMARMCPUIDASIMDASIMDDPASIMDHPASIMDRDMATOMICSCRC32DCPOPEVTSTRMFCMAFPFPHPGPAJSCVTLRCPCPMULLSHA1SHA2SHA3SHA512SM3SM4SVElastID" -var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 65, 69, 79, 91, 99, 107, 115, 123, 130, 140, 150, 158, 168, 179, 187, 197, 215, 230, 237, 249, 256, 263, 274, 278, 282, 288, 293, 301, 306, 312, 316, 325, 343, 351, 358, 362, 366, 380, 386, 390, 394, 403, 407, 411, 416, 421, 425, 429, 436, 440, 443, 449, 452, 455, 465, 475, 488, 501, 505, 509, 523, 540, 543, 553, 564, 570, 578, 589, 597, 609, 625, 639, 650, 660, 675, 683, 693, 700, 704, 707, 714, 719, 730, 737, 744, 752, 755, 761, 766, 775, 782, 790, 794, 797, 803, 816, 821, 823, 830, 837, 843, 847, 856, 860, 865, 871, 877, 883, 886, 902, 911, 914, 923, 938, 951, 957, 971, 978, 981, 986, 989, 992, 1004, 1018, 1028, 1031, 1035, 1039, 1043, 1048, 1053, 1058, 1063, 1077, 1088, 1094, 1097, 1102, 1111, 1115, 1120, 1125, 1131, 1138, 1143, 1146, 1162, 1165, 1171, 1181, 1189, 1193, 1202, 1206, 1218, 1221, 1231, 1234, 1241, 1249, 1252, 1259, 1262, 1267, 1273, 1281, 1287, 1293, 1301, 1306, 1313, 1320, 1328, 1335, 1340, 1345, 1352, 1356, 1358, 1362, 1365, 1370, 1375, 1380, 1384, 1388, 1392, 1398, 1401, 1404, 1407, 1413} +var _FeatureID_index = [...]uint16{0, 7, 10, 15, 23, 34, 41, 48, 55, 62, 67, 70, 75, 84, 93, 102, 106, 116, 128, 136, 144, 152, 160, 167, 177, 187, 195, 205, 216, 224, 234, 252, 267, 274, 286, 293, 300, 311, 319, 323, 327, 333, 338, 346, 351, 357, 361, 370, 388, 396, 403, 407, 411, 425, 431, 435, 439, 448, 452, 456, 461, 466, 470, 474, 481, 485, 488, 494, 497, 500, 510, 520, 533, 546, 550, 554, 568, 585, 588, 598, 609, 615, 623, 634, 642, 654, 670, 684, 695, 705, 720, 728, 739, 749, 756, 765, 775, 779, 782, 789, 794, 805, 812, 819, 827, 830, 836, 841, 850, 857, 865, 869, 872, 878, 885, 898, 903, 905, 912, 919, 925, 929, 938, 942, 947, 953, 959, 965, 975, 978, 994, 1003, 1006, 1015, 1030, 1043, 1049, 1063, 1070, 1073, 1078, 1081, 1084, 1096, 1110, 1120, 1123, 1127, 1131, 1135, 1140, 1145, 1150, 1155, 1169, 1180, 1186, 1189, 1194, 1203, 1207, 1212, 1217, 1223, 1230, 1235, 1238, 1247, 1263, 1266, 1272, 1282, 1290, 1294, 1303, 1307, 1319, 1322, 1332, 1335, 1342, 1350, 1357, 1360, 1367, 1370, 1375, 1381, 1389, 1395, 1401, 1409, 1414, 1421, 1428, 1436, 1443, 1448, 1453, 1460, 1464, 1466, 1470, 1473, 1478, 1483, 1488, 1492, 1496, 1500, 1506, 1509, 1512, 1515, 1521} func (i FeatureID) String() string { if i < 0 || i >= FeatureID(len(_FeatureID_index)-1) { diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE deleted file mode 100644 index 5d8cb5b7..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb946..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be2143..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 258c0636..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 8fb59ad2..00000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/minio/asm2plan9s/.gitignore b/vendor/github.com/minio/asm2plan9s/.gitignore deleted file mode 100644 index cdec7197..00000000 --- a/vendor/github.com/minio/asm2plan9s/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -.idea -asm2plan9s diff --git a/vendor/github.com/minio/asm2plan9s/README.md b/vendor/github.com/minio/asm2plan9s/README.md deleted file mode 100644 index c6740600..00000000 --- a/vendor/github.com/minio/asm2plan9s/README.md +++ /dev/null @@ -1,91 +0,0 @@ - -asm2plan9s -========== - -Tool to generate BYTE sequences for Go assembly as generated by YASM/GAS (for Intel) or GAS (for ARM). - -Installation ------------- - -Make sure either YASM or GAS is installed on your platform. Note that YASM takes precedence over GAS if both are installed. - -##### GAS (Intel/AMD64 or ARM): - -Typically `as` or `gas` (GNU Assembler) will already be installed as it is part of binutils, but if need be, you can eg. do as follows: -``` -sudo apt-get install build-essential -``` - -##### YASM (Intel/AMD64): - -``` -$ yasm --version -yasm 1.2.0 -Compiled on Sep 30 2013. -Copyright (c) 2001-2011 Peter Johnson and other Yasm developers. -Run yasm --license for licensing overview and summary. -``` - -### asm2plan9s - - `go get -u github.com/minio/asm2plan9s` - -### AVX512 support - -Note that AVX512 support is currently only available for GAS. - -Example -------- - -``` -$ more example.s - // VPADDQ XMM0,XMM1,XMM8 -$ asm2plan9s example.s -$ echo example.s - LONG $0xd471c1c4; BYTE $0xc0 // VPADDQ XMM0,XMM1,XMM8 -``` - -The instruction to be assembled needs to start with a `//` preceded by either a single space or a tab character. -The preceding characters will be overwitten by the correct sequence (irrespective of its contents) so when changing the instruction, rerunning `asm2plan9s` will update the BYTE sequence generated. - -Starting position of instruction --------------------------------- - -The starting position of the `//` comment needs to follow the (imaginary) sequence with either a single space or a space followed by a back slash plus another space (see support for defines below). -Upon first entering an instruction you can also type eg `LONG $0x00000000; BYTE $0x00 // VZEROUPPER` to trigger the assembler. - -Support for defines -------------------- - -If you are using #define for 'macros' with the back-slash delimiter to continue on the next line, this will be preserved. - -For instance: -``` - \ // VPADDQ XMM0,XMM1,XMM8 -``` - -will be assembled into - -``` - LONG $0xd471c1c4; BYTE $0xc0 \ // VPADDQ XMM0,XMM1,XMM8 -``` - -asmfmt ------- - -asm2plan9s works nicely together with [asmfmt](https://github.com/klauspost/asmfmt) in order to format the assembly code (in a similar style to `go fmt`). - -Extensive example ------------------ - -For a more extensive example see (for Intel) [compressAvx_amd64.s](https://github.com/minio/blake2b-simd/blob/master/compressAvx_amd64.s) or (for ARM) [highwayhash_arm64.s](https://github.com/minio/highwayhash/blob/master/highwayhash_arm64.s). - -License -------- - -Released under the Apache License v2.0. You can find the complete text in the file LICENSE. - -Contributing ------------- - -Contributions are welcome, please send PRs for any enhancements. \ No newline at end of file diff --git a/vendor/github.com/minio/asm2plan9s/asm2plan9s.go b/vendor/github.com/minio/asm2plan9s/asm2plan9s.go deleted file mode 100644 index 9adddb58..00000000 --- a/vendor/github.com/minio/asm2plan9s/asm2plan9s.go +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "bufio" - "fmt" - "io" - "log" - "os" - "strings" -) - -type Instruction struct { - instruction string - lineno int - commentPos int - inDefine bool - assembled string - opcodes []byte -} - -type Assembler struct { - Prescan bool - Instructions []Instruction - Compact bool -} - -// assemble assembles an array of lines into their -// resulting plan9 equivalents -func (a *Assembler) assemble(lines []string) ([]string, error) { - - result := make([]string, 0) - - for lineno, line := range lines { - startsWithTab := strings.HasPrefix(line, "\t") - line := strings.Replace(line, "\t", " ", -1) - fields := strings.Split(line, "//") - if len(fields) == 2 && (startsAfterLongWordByteSequence(fields[0]) || len(fields[0]) == 65) { - - // test whether string before instruction is terminated with a backslash (so used in a #define) - trimmed := strings.TrimSpace(fields[0]) - inDefine := len(trimmed) > 0 && string(trimmed[len(trimmed)-1]) == `\` - - // While prescanning collect the instructions - if a.Prescan { - ins := Instruction{instruction: fields[1], lineno: lineno, commentPos: len(fields[0]), inDefine: inDefine} - a.Instructions = append(a.Instructions, ins) - continue - } - - var ins *Instruction - for i := range a.Instructions { - if lineno == a.Instructions[i].lineno { - ins = &a.Instructions[i] - } - } - if ins == nil { - if a.Compact { - continue - } - panic("failed to find entry with correct line number") - } - if startsWithTab { - ins.assembled = strings.Replace(ins.assembled, " ", "\t", 1) - } - result = append(result, ins.assembled) - } else if !a.Prescan { - if startsWithTab { - line = strings.Replace(line, " ", "\t", 1) - } - result = append(result, line) - } - } - - return result, nil -} - -// startsAfterLongWordByteSequence determines if an assembly instruction -// starts on a position after a combination of LONG, WORD, BYTE sequences -func startsAfterLongWordByteSequence(prefix string) bool { - - if len(strings.TrimSpace(prefix)) != 0 && !strings.HasPrefix(prefix, " LONG $0x") && - !strings.HasPrefix(prefix, " WORD $0x") && !strings.HasPrefix(prefix, " BYTE $0x") { - return false - } - - length := 4 + len(prefix) + 1 - - for objcodes := 3; objcodes <= 8; objcodes++ { - - ls, ws, bs := 0, 0, 0 - - oc := objcodes - - for ; oc >= 4; oc -= 4 { - ls++ - } - if oc >= 2 { - ws++ - oc -= 2 - } - if oc == 1 { - bs++ - } - size := 4 + ls*(len("LONG $0x")+8) + ws*(len("WORD $0x")+4) + bs*(len("BYTE $0x")+2) + (ls+ws+bs-1)*len("; ") - - if length == size+6 { // comment starts after a space - return true - } - } - return false -} - -// combineLines shortens the output by combining consecutive lines into a larger list of opcodes -func (a *Assembler) combineLines() { - startIndex, startLine, opcodes := -1, -1, make([]byte, 0, 1024) - combined := make([]Instruction, 0, 100) - for i, ins := range a.Instructions { - if startIndex == -1 { - startIndex, startLine = i, ins.lineno - } - if ins.lineno != startLine+(i-startIndex) { // we have found a non-consecutive line - combiAssem, _ := toPlan9s(opcodes, "", 0, false) - combiIns := Instruction{assembled: combiAssem, lineno: startLine, inDefine: false} - - combined = append(combined, combiIns) - opcodes = opcodes[:0] - startIndex, startLine = i, ins.lineno - } - opcodes = append(opcodes, ins.opcodes...) - } - if len(opcodes) > 0 { - combiAssem, _ := toPlan9s(opcodes, "", 0, false) - ins := Instruction{assembled: combiAssem, lineno: startLine, inDefine: false} - - combined = append(combined, ins) - } - - a.Instructions = combined -} - -// readLines reads a whole file into memory -// and returns a slice of its lines. -func readLines(path string, in io.Reader) ([]string, error) { - if in == nil { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - in = file - } - - var lines []string - scanner := bufio.NewScanner(in) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - return lines, scanner.Err() -} - -// writeLines writes the lines to the given file. -func writeLines(lines []string, path string, out io.Writer) error { - if path != "" { - file, err := os.Create(path) - if err != nil { - return err - } - defer file.Close() - out = file - } - - w := bufio.NewWriter(out) - for _, line := range lines { - fmt.Fprintln(w, line) - } - return w.Flush() -} - -func assemble(lines []string, compact bool) (result []string, err error) { - - // TODO: Make compaction configurable - a := Assembler{Prescan: true, Compact: compact} - - _, err = a.assemble(lines) - if err != nil { - return result, err - } - - err = as(a.Instructions) - if err != nil { - return result, err - } - - if a.Compact { - a.combineLines() - } - - a.Prescan = false - result, err = a.assemble(lines) - if err != nil { - return result, err - } - - return result, nil -} - -func main() { - - file := "" - if len(os.Args) >= 2 { - file = os.Args[1] - } - - var lines []string - var err error - if len(file) > 0 { - fmt.Println("Processing file", file) - lines, err = readLines(file, nil) - } else { - lines, err = readLines("", os.Stdin) - } - if err != nil { - log.Fatalf("readLines: %s", err) - } - - result, err := assemble(lines, false) - if err != nil { - fmt.Print(err) - os.Exit(-1) - } - - err = writeLines(result, file, os.Stdout) - if err != nil { - log.Fatalf("writeLines: %s", err) - } -} diff --git a/vendor/github.com/minio/asm2plan9s/asm2plan9s_amd64.go b/vendor/github.com/minio/asm2plan9s/asm2plan9s_amd64.go deleted file mode 100644 index e23b5310..00000000 --- a/vendor/github.com/minio/asm2plan9s/asm2plan9s_amd64.go +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "encoding/hex" - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "regexp" - "strconv" - "strings" -) - -// as: assemble instruction by either invoking yasm or gas -func as(instructions []Instruction) error { - - // First to yasm (will return error when not installed) - e := yasm(instructions) - if e == nil { - return e - } - // Try gas if yasm not installed - return gas(instructions) -} - -// See below for YASM support (older, no AVX512) - -/////////////////////////////////////////////////////////////////////////////// -// -// G A S S U P P O R T -// -/////////////////////////////////////////////////////////////////////////////// - -// -// frank@hemelmeer: asm2plan9s$ more example.s -// .intel_syntax noprefix -// -// VPANDQ ZMM0, ZMM1, ZMM2 -// -// frank@hemelmeer: asm2plan9s$ as -o example.o -al=example.lis example.s -// frank@hemelmeer: asm2plan9s$ more example.lis -// GAS LISTING example.s page 1 -// 1 .intel_syntax noprefix -// 2 -// 3 0000 62F1F548 VPANDQ ZMM0, ZMM1, ZMM2 -// 3 DBC2 -// - -func gas(instructions []Instruction) error { - - tmpfile, err := ioutil.TempFile("", "asm2plan9s") - if err != nil { - return err - } - if _, err := tmpfile.Write([]byte(fmt.Sprintf(".intel_syntax noprefix\n"))); err != nil { - return err - } - - for _, instr := range instructions { - instrFields := strings.Split(instr.instruction, "/*") - if len(instrFields) == 1 { - instrFields = strings.Split(instr.instruction, ";") // try again with ; separator - } - content := []byte(instrFields[0] + "\n") - - if _, err := tmpfile.Write([]byte(content)); err != nil { - return err - } - } - - if err := tmpfile.Close(); err != nil { - return err - } - - asmFile := tmpfile.Name() + ".asm" - lisFile := tmpfile.Name() + ".lis" - objFile := tmpfile.Name() + ".obj" - os.Rename(tmpfile.Name(), asmFile) - - defer os.Remove(asmFile) // clean up - defer os.Remove(lisFile) // clean up - defer os.Remove(objFile) // clean up - - // as -o example.o -al=example.lis example.s - app := "as" - - arg0 := "-o" - arg1 := objFile - arg2 := fmt.Sprintf("-aln=%s", lisFile) - arg3 := asmFile - - cmd := exec.Command(app, arg0, arg1, arg2, arg3) - cmb, err := cmd.CombinedOutput() - if err != nil { - asmErrs := strings.Split(string(cmb)[len(asmFile)+1:], ":") - asmErr := strings.Join(asmErrs[1:], ":") - // TODO: Fix proper error reporting - lineno := -1 - instr := "TODO: fix" - return errors.New(fmt.Sprintf("GAS error (line %d for '%s'):", lineno+1, strings.TrimSpace(instr)) + asmErr) - } - - opcodes, err := toPlan9sGas(lisFile) - if err != nil { - return err - } - - if len(instructions) != len(opcodes) { - panic("Unequal length between instructions to be assembled and opcodes returned") - } - - for i, opcode := range opcodes { - assembled, err := toPlan9s(opcode, instructions[i].instruction, instructions[i].commentPos, instructions[i].inDefine) - if err != nil { - return err - } - instructions[i].assembled = assembled - instructions[i].opcodes = make([]byte, len(opcode)) - copy(instructions[i].opcodes, opcode) - } - - return nil -} - -func toPlan9sGas(listFile string) ([][]byte, error) { - - opcodes := make([][]byte, 0, 10) - - outputLines, err := readLines(listFile, nil) - if err != nil { - return opcodes, err - } - - var regexpHeader = regexp.MustCompile(`^\s+(\d+)\s+[0-9a-fA-F]+\s+([0-9a-fA-F]+)`) - var regexpSequel = regexp.MustCompile(`^\s+(\d+)\s+([0-9a-fA-F]+)`) - - lineno, opcode := -1, make([]byte, 0, 10) - - for _, line := range outputLines { - - if match := regexpHeader.FindStringSubmatch(line); len(match) > 2 { - l, e := strconv.Atoi(match[1]) - if e != nil { - panic(e) - } - if lineno != -1 { - opcodes = append(opcodes, opcode) - } - lineno = l - opcode = make([]byte, 0, 10) - b, e := hex.DecodeString(match[2]) - if e != nil { - panic(e) - } - opcode = append(opcode, b...) - } else if match := regexpSequel.FindStringSubmatch(line); len(match) > 2 { - l, e := strconv.Atoi(match[1]) - if e != nil { - panic(e) - } - if l != lineno { - panic("bad line number)") - } - b, e := hex.DecodeString(match[2]) - if e != nil { - panic(e) - } - opcode = append(opcode, b...) - } - } - - opcodes = append(opcodes, opcode) - - return opcodes, nil -} diff --git a/vendor/github.com/minio/asm2plan9s/asm2plan9s_arm64.go b/vendor/github.com/minio/asm2plan9s/asm2plan9s_arm64.go deleted file mode 100644 index 42bd4523..00000000 --- a/vendor/github.com/minio/asm2plan9s/asm2plan9s_arm64.go +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016-2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "regexp" - "strings" -) - -func as(instructions []Instruction) error { - - // First to yasm (will return error when not installed) - e := yasm(instructions) - if e == nil { - return e - } - // Try gas if yasm not installed - return gas(instructions) -} - -func gas(instructions []Instruction) error { - for i, ins := range instructions { - assembled, opcodes, err := asSingle(ins.instruction, ins.lineno, ins.commentPos, ins.inDefine) - if err != nil { - return err - } - instructions[i].assembled = assembled - instructions[i].opcodes = make([]byte, len(opcodes)) - copy(instructions[i].opcodes[:], opcodes) - } - return nil -} - -func asSingle(instr string, lineno, commentPos int, inDefine bool) (string, []byte, error) { - - instrFields := strings.Split(instr, "/*") - content := []byte(instrFields[0] + "\n") - tmpfile, err := ioutil.TempFile("", "asm2plan9s") - if err != nil { - return "", nil, err - } - - if _, err := tmpfile.Write(content); err != nil { - return "", nil, err - } - if err := tmpfile.Close(); err != nil { - return "", nil, err - } - - asmFile := tmpfile.Name() + ".asm" - lisFile := tmpfile.Name() + ".lis" - objFile := tmpfile.Name() + ".obj" - os.Rename(tmpfile.Name(), asmFile) - - defer os.Remove(asmFile) // clean up - defer os.Remove(lisFile) // clean up - defer os.Remove(objFile) // clean up - - // as -march=armv8-a+crypto -o first.out -al=first.lis first.s - app := "as" - - arg0 := "-march=armv8-a+crypto" // See https://gcc.gnu.org/onlinedocs/gcc-4.9.1/gcc/ARM-Options.html - arg1 := "-o" - arg2 := objFile - arg3 := fmt.Sprintf("-al=%s", lisFile) - arg4 := asmFile - - cmd := exec.Command(app, arg0, arg1, arg2, arg3, arg4) - cmb, err := cmd.CombinedOutput() - if err != nil { - asmErrs := strings.Split(string(cmb)[len(asmFile)+1:], ":") - asmErr := strings.Join(asmErrs[1:], ":") - return "", nil, errors.New(fmt.Sprintf("GAS error (line %d for '%s'):", lineno+1, strings.TrimSpace(instr)) + asmErr) - } - - return toPlan9sArm(lisFile, instr) -} - -func toPlan9sArm(listFile, instr string) (string, []byte, error) { - - var r = regexp.MustCompile(`^\s+\d+\s+\d+\s+([0-9a-fA-F]+)`) - - outputLines, err := readLines(listFile, nil) - if err != nil { - return "", nil, err - } - - lastLine := outputLines[len(outputLines)-1] - - sline := " " - - if match := r.FindStringSubmatch(lastLine); len(match) > 1 { - sline += fmt.Sprintf("WORD $0x%s%s%s%s", strings.ToLower(match[1][6:8]), strings.ToLower(match[1][4:6]), strings.ToLower(match[1][2:4]), strings.ToLower(match[1][0:2])) - } else { - return "", nil, errors.New("regexp failed") - } - - sline += " //" + instr - - // fmt.Println(sline) - - return sline, nil, nil -} diff --git a/vendor/github.com/minio/asm2plan9s/example.s b/vendor/github.com/minio/asm2plan9s/example.s deleted file mode 100644 index 7ce23c3b..00000000 --- a/vendor/github.com/minio/asm2plan9s/example.s +++ /dev/null @@ -1 +0,0 @@ - // VPADDQ XMM0,XMM1,XMM8 diff --git a/vendor/github.com/minio/asm2plan9s/neon.asm b/vendor/github.com/minio/asm2plan9s/neon.asm deleted file mode 100644 index d766f197..00000000 --- a/vendor/github.com/minio/asm2plan9s/neon.asm +++ /dev/null @@ -1 +0,0 @@ - WORD $0x00000000 // ld1 {v16.4s-v19.4s}, [x3], #64 diff --git a/vendor/github.com/minio/asm2plan9s/yasm.go b/vendor/github.com/minio/asm2plan9s/yasm.go deleted file mode 100644 index 7d978992..00000000 --- a/vendor/github.com/minio/asm2plan9s/yasm.go +++ /dev/null @@ -1,176 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "strings" - "unicode" -) - -/////////////////////////////////////////////////////////////////////////////// -// -// Y A S M S U P P O R T -// -/////////////////////////////////////////////////////////////////////////////// - -// -// yasm-assemble-disassemble-roundtrip-sse.txt -// -// franks-mbp:sse frankw$ more assembly.asm -// [bits 64] -// -// VPXOR YMM4, YMM2, YMM3 ; X4: Result -// franks-mbp:sse frankw$ yasm assembly.asm -// franks-mbp:sse frankw$ hexdump -C assembly -// 00000000 c5 ed ef e3 |....| -// 00000004 -// franks-mbp:sse frankw$ echo 'lbl: db 0xc5, 0xed, 0xef, 0xe3' | yasm -f elf - -o assembly.o -// franks-mbp:sse frankw$ gobjdump -d -M intel assembly.o -// -// assembly.o: file format elf32-i386 -// -// -// Disassembly of section .text: -// -// 00000000 <.text>: -// 0: c5 ed ef e3 vpxor ymm4,ymm2,ymm3 - -func yasm(instructions []Instruction) error { - for i, ins := range instructions { - assembled, opcodes, err := yasmSingle(ins.instruction, ins.lineno, ins.commentPos, ins.inDefine) - if err != nil { - return err - } - instructions[i].assembled = assembled - instructions[i].opcodes = make([]byte, len(opcodes)) - copy(instructions[i].opcodes[:], opcodes) - } - return nil -} - -func yasmSingle(instr string, lineno, commentPos int, inDefine bool) (string, []byte, error) { - - instrFields := strings.Split(instr, "/*") - content := []byte("[bits 64]\n" + instrFields[0]) - tmpfile, err := ioutil.TempFile("", "asm2plan9s") - if err != nil { - return "", nil, err - } - - if _, err := tmpfile.Write(content); err != nil { - return "", nil, err - } - if err := tmpfile.Close(); err != nil { - return "", nil, err - } - - asmFile := tmpfile.Name() + ".asm" - objFile := tmpfile.Name() + ".obj" - os.Rename(tmpfile.Name(), asmFile) - - defer os.Remove(asmFile) // clean up - defer os.Remove(objFile) // clean up - - app := "yasm" - - arg0 := "-o" - arg1 := objFile - arg2 := asmFile - - cmd := exec.Command(app, arg0, arg1, arg2) - cmb, err := cmd.CombinedOutput() - if err != nil { - if len(string(cmb)) == 0 { // command invocation failed - return "", nil, errors.New("exec error: YASM not installed?") - } - yasmErrs := strings.Split(string(cmb)[len(asmFile)+1:], ":") - yasmErr := strings.Join(yasmErrs[1:], ":") - return "", nil, errors.New(fmt.Sprintf("YASM error (line %d for '%s'):", lineno+1, strings.TrimSpace(instr)) + yasmErr) - } - - return toPlan9sYasm(objFile, instr, commentPos, inDefine) -} - -func toPlan9sYasm(objFile, instr string, commentPos int, inDefine bool) (string, []byte, error) { - opcodes, err := ioutil.ReadFile(objFile) - if err != nil { - return "", nil, err - } - - s, err := toPlan9s(opcodes, instr, commentPos, inDefine) - return s, opcodes, err -} - -func toPlan9s(opcodes []byte, instr string, commentPos int, inDefine bool) (string, error) { - sline := " " - i := 0 - // First do QUADs (as many as needed) - for ; len(opcodes) >= 8; i++ { - if i != 0 { - sline += "; " - } - sline += fmt.Sprintf("QUAD $0x%02x%02x%02x%02x%02x%02x%02x%02x", opcodes[7], opcodes[6], opcodes[5], opcodes[4], opcodes[3], opcodes[2], opcodes[1], opcodes[0]) - - opcodes = opcodes[8:] - } - // Then do LONGs (as many as needed) - for ; len(opcodes) >= 4; i++ { - if i != 0 { - sline += "; " - } - sline += fmt.Sprintf("LONG $0x%02x%02x%02x%02x", opcodes[3], opcodes[2], opcodes[1], opcodes[0]) - - opcodes = opcodes[4:] - } - - // Then do a WORD (if needed) - if len(opcodes) >= 2 { - - if i != 0 { - sline += "; " - } - sline += fmt.Sprintf("WORD $0x%02x%02x", opcodes[1], opcodes[0]) - - i++ - opcodes = opcodes[2:] - } - - // And close with a BYTE (if needed) - if len(opcodes) == 1 { - if i != 0 { - sline += "; " - } - sline += fmt.Sprintf("BYTE $0x%02x", opcodes[0]) - - i++ - opcodes = opcodes[1:] - } - - if inDefine { - if commentPos > commentPos-2-len(sline) { - if commentPos-2-len(sline) > 0 { - sline += strings.Repeat(" ", commentPos-2-len(sline)) - } - } else { - sline += " " - } - sline += `\ ` - } else { - if commentPos > len(sline) { - if commentPos-len(sline) > 0 { - sline += strings.Repeat(" ", commentPos-len(sline)) - } - } else { - sline += " " - } - } - - if instr != "" { - sline += "//" + instr - } - - return strings.TrimRightFunc(sline, unicode.IsSpace), nil -} diff --git a/vendor/github.com/minio/c2goasm/README.md b/vendor/github.com/minio/c2goasm/README.md deleted file mode 100644 index f6ab1300..00000000 --- a/vendor/github.com/minio/c2goasm/README.md +++ /dev/null @@ -1,203 +0,0 @@ -# c2goasm: C to Go Assembly - -## Introduction - -This is a tool to convert assembly as generated by a C/C++ compiler into Golang assembly. It is meant to be used in combination with [asm2plan9s](https://github.com/minio/asm2plan9s) in order to automatically generate pure Go wrappers for C/C++ code (that may for instance take advantage of compiler SIMD intrinsics or `template<>` code). - -Mode of operation: -``` -$ c2goasm -a /path/to/some/great/c-code.s /path/to/now/great/golang-code_amd64.s -``` - -You can optionally nicely format the code using [asmfmt](https://github.com/klauspost/asmfmt) by passing in an `-f` flag. - -This project has been developed as part of developing a Go wrapper around [Simd](https://github.com/fwessels/go-cv-simd). However it should also work with other projects and libraries. Keep in mind though that it is not intented to 'port' a complete C/C++ project in a single action but rather do it on a case-by-case basis per function/source file (and create accompanying high level Go code to call into the assembly code). - -## Command line options -``` -$ c2goasm --help -Usage of c2goasm: - -a Immediately invoke asm2plan9s - -c Compact byte codes - -f Format using asmfmt - -s Strip comments -``` - -## A simple example - -Here is a simple C function doing an AVX2 intrinsics computation: -``` -void MultiplyAndAdd(float* arg1, float* arg2, float* arg3, float* result) { - __m256 vec1 = _mm256_load_ps(arg1); - __m256 vec2 = _mm256_load_ps(arg2); - __m256 vec3 = _mm256_load_ps(arg3); - __m256 res = _mm256_fmadd_ps(vec1, vec2, vec3); - _mm256_storeu_ps(result, res); -} -``` - -Compiling into assembly gives the following -``` -__ZN14MultiplyAndAddEPfS1_S1_S1_: ## @_ZN14MultiplyAndAddEPfS1_S1_S1_ -## BB#0: - push rbp - mov rbp, rsp - vmovups ymm0, ymmword ptr [rdi] - vmovups ymm1, ymmword ptr [rsi] - vfmadd213ps ymm1, ymm0, ymmword ptr [rdx] - vmovups ymmword ptr [rcx], ymm1 - pop rbp - vzeroupper - ret -``` - -Running `c2goasm` will generate the following Go assembly (eg. saved in `MultiplyAndAdd_amd64.s`) -``` -//+build !noasm !appengine -// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT - -TEXT ·_MultiplyAndAdd(SB), $0-32 - - MOVQ vec1+0(FP), DI - MOVQ vec2+8(FP), SI - MOVQ vec3+16(FP), DX - MOVQ result+24(FP), CX - - LONG $0x0710fcc5 // vmovups ymm0, yword [rdi] - LONG $0x0e10fcc5 // vmovups ymm1, yword [rsi] - LONG $0xa87de2c4; BYTE $0x0a // vfmadd213ps ymm1, ymm0, yword [rdx] - LONG $0x0911fcc5 // vmovups yword [rcx], ymm1 - - VZEROUPPER - RET -``` - -This needs to be accompanied by the following Go code (in `MultiplyAndAdd_amd64.go`) -``` -//go:noescape -func _MultiplyAndAdd(vec1, vec2, vec3, result unsafe.Pointer) - -func MultiplyAndAdd(someObj Object) { - - _MultiplyAndAdd(someObj.GetVec1(), someObj.GetVec2(), someObj.GetVec3(), someObj.GetResult())) -} -``` - -And as you may have gathered the amd64.go file needs to be in place in order for the arguments names to be derived (and allow `go vet` to succeed). - -## Benchmark against cgo - -We have run benchmarks of `c2goasm` versus `cgo` for both Go version 1.7.5 and 1.8.1. You can find the `c2goasm` benchmark test in `test/` and the `cgo` test in `cgocmp/` respectively. Here are the results for both versions: -``` -$ benchcmp ../cgocmp/cgo-1.7.5.out c2goasm.out -benchmark old ns/op new ns/op delta -BenchmarkMultiplyAndAdd-12 382 10.9 -97.15% -``` -``` -$ benchcmp ../cgocmp/cgo-1.8.1.out c2goasm.out -benchmark old ns/op new ns/op delta -BenchmarkMultiplyAndAdd-12 236 10.9 -95.38% -``` - -As you can see Golang 1.8 has made a significant improvement (38.2%) over 1.7.5, but it is still about 20x slower than directly calling into assembly code as wrapped by `c2goasm`. - -## Converted projects - -- [go-cv-simd (WIP)](https://github.com/fwessels/go-cv-simd) - -## Internals - -The basic process is to (in the prologue) setup the stack and registers as how the C code expects this to be the case, and upon exiting the subroutine (in the epilogue) to revert back to the golang world and pass a return value back if required. In more details: -- Define assembly subroutine with proper golang decoration in terms of needed stack space and overall size of arguments plus return value. -- Function arguments are loaded from the golang stack into registers and prior to starting the C code any arguments beyond 6 are stored in C stack space. -- Stack space is reserved and setup for the C code. Depending on the C code, the stack pointer maybe aligned on a certain boundary (especially needed for code that takes advantages of SIMD instructions such as AVX etc.). -- A constants table is generated (if needed) and any `rip`-based references are replaced with proper offsets to where Go will put the table. - -## Limitations - -- Arguments need (for now) to be 64-bit size, meaning either a value or a pointer (this requirement will be lifted) -- Maximum number of 14 arguments (hard limit -- if you hit this maybe you should rethink your api anyway...) -- Generally no `call` statements (thus inline your C code) with a couple of exceptions for functions such as `memset` and `memcpy` (see `clib_amd64.s`) - -## Generate assembly from C/C++ - -For eg. projects using cmake, here is how to see a list of assembly targets -``` -$ make help | grep "\.s" -``` - -To see the actual command to generate the assembly -``` -$ make -n SimdAvx2BgraToGray.s -``` - -## Supported golang architectures - -For now just the AMD64 architecture is supported. Also ARM64 should work just fine in a similar fashion but support is lacking at the moment. - -## Compatible compilers - -The following compilers have been tested: -- `clang` (Apple LLVM version) on OSX/darwin -- `clang` on linux - -Compiler flags: -``` --masm=intel -mno-red-zone -mstackrealign -mllvm -inline-threshold=1000 -fno-asynchronous-unwind-tables -fno-exceptions -fno-rtti -``` - -| Flag | Explanation | -|:----------------------------------| :--------------------------------------------------| -| `-masm=intel` | Output Intel syntax for assembly | -| `-mno-red-zone` | Do not write below stack pointer (avoid [red zone](https://en.wikipedia.org/wiki/Red_zone_(computing))) | -| `-mstackrealign` | Use explicit stack initialization | -| `-mllvm -inline-threshold=1000` | Higher limit for inlining heuristic (default=255) | -| `-fno-asynchronous-unwind-tables` | Do not generate unwind tables (for debug purposes) | -| `-fno-exceptions` | Disable exception handling | -| `-fno-rtti` | Disable run-time type information | - -The following flags are only available in `clang -cc1` frontend mode (see [below]()): - -| Flag | Explanation | -|:----------------------------------| :------------------------------------------------------------------| -| `-fno-jump-tables` | Do not use jump tables as may be generated for `select` statements | - -#### `clang` vs `clang -cc1` - -As per the clang [FAQ](https://clang.llvm.org/docs/FAQ.html#driver), `clang -cc1` is the frontend, and `clang` is a (mostly GCC compatible) driver for the frontend. To see all options that the driver passes on to the frontend, use `-###` like this: - -``` -$ clang -### -c hello.c -"/usr/lib/llvm/bin/clang" "-cc1" "-triple" "x86_64-pc-linux-gnu" etc. etc. etc. -``` - -#### Command line flags for clang - -To see all command line flags use either `clang --help` or `clang --help-hidden` for the clang driver or `clang -cc1 -help` for the frontend. - -#### Further optimization and fine tuning - -Using the LLVM optimizer ([opt](http://llvm.org/docs/CommandGuide/opt.html)) you can further optimize the code generation. Use `opt -help` or `opt -help-hidden` for all available options. - -An option can be passed in via `clang` using the `-mllvm ` option, such as `-mllvm -inline-threshold=1000` as discussed above. - -Also LLVM allows you to tune specific functions via [function attributes](http://llvm.org/docs/LangRef.html#function-attributes) like `define void @f() alwaysinline norecurse { ... }`. - -#### What about GCC support? - -For now GCC code will not work out of the box. However there is no reason why GCC should not work fundamentally (PRs are welcome). - -## Resources - -- [A Primer on Go Assembly](https://github.com/teh-cmc/go-internals/blob/master/chapter1_assembly_primer/README.md) -- [Go Function in Assembly](https://github.com/golang/go/files/447163/GoFunctionsInAssembly.pdf) -- [Stack frame layout on x86-64](http://eli.thegreenplace.net/2011/09/06/stack-frame-layout-on-x86-64) -- [Compiler Explorer (interactive)](https://go.godbolt.org/) - -## License - -c2goasm is released under the Apache License v2.0. You can find the complete text in the file LICENSE. - -## Contributing - -Contributions are welcome, please send PRs for any enhancements. diff --git a/vendor/github.com/minio/c2goasm/arguments.go b/vendor/github.com/minio/c2goasm/arguments.go deleted file mode 100644 index ea310eae..00000000 --- a/vendor/github.com/minio/c2goasm/arguments.go +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -type StackArgs struct { - Number int - OffsetToFirst int -} - -func argumentsOnStack(lines []string) StackArgs { - - offsets := make(map[uint]bool) - - for _, l := range lines { - l, _ = stripComments(l) - if match := regexpRbpLoadHigher.FindStringSubmatch(l); len(match) > 1 { - offset, _ := strconv.Atoi(match[1]) - if _, found := offsets[uint(offset)]; !found { - offsets[uint(offset)] = true - } - } - } - - offset := uint(0) - for o := range offsets { - if o > offset { - offset = o - } - } - if offset >= 16 { - return StackArgs{OffsetToFirst: 16, Number: 1 + int((offset-16)/8)} - } - return StackArgs{OffsetToFirst: 0, Number: 0} -} - -func parseCompanionFile(goCompanion, protoName string) ([]string, []string) { - - gocode, err := readLines(goCompanion) - if err != nil { - panic(fmt.Sprintf("Failed to read companion go code: %v", err)) - } - - for _, goline := range gocode { - - ok, args, rets, err := getGolangArgs(protoName, goline) - if err != nil { - panic(fmt.Sprintf("Error: %v", err)) - } else if ok { - return args, rets - } - } - - panic(fmt.Sprintf("Failed to find function prototype for %s", protoName)) -} - -var regexpFuncAndArgs = regexp.MustCompile(`^\s*func\s+([^\(]*)\(([^\)]*)\)(.*)`) -var regexpReturnVals = regexp.MustCompile(`^\((.*)\)`) - -func getGolangArgs(protoName, goline string) (isFunc bool, args, rets []string, err error) { - - // Search for name of function and arguments - if match := regexpFuncAndArgs.FindStringSubmatch(goline); len(match) > 2 { - if match[1] == "_"+protoName { - - args, rets = []string{}, []string{} - if match[2] != "" { - for _, arg := range strings.Split(match[2], ",") { - args = append(args, strings.Fields(arg)[0]) - } - } - - trailer := strings.TrimSpace(match[3]) - if len(trailer) > 0 { - // Trailing string found, search for return values - if rmatch := regexpReturnVals.FindStringSubmatch(trailer); len(rmatch) > 1 { - for _, ret := range strings.Split(rmatch[1], ",") { - rets = append(rets, strings.Fields(ret)[0]) - } - } else { - return false, args, rets, errors.New(fmt.Sprintf("Badly formatted return argument (please use parenthesis and proper arguments naming): %s", trailer)) - } - - } - - return true, args, rets, nil - } - } - - return false, []string{}, []string{}, nil -} - -func getTotalSizeOfArguments(argStart, argEnd int) uint { - // TODO: Test if correct for non 64-bit arguments - return uint((argEnd - argStart + 1) * 8) -} - -func getTotalSizeOfArgumentsAndReturnValues(argStart, argEnd int, returnValues []string) uint { - // TODO: Test if correct for non 64-bit return values - return getTotalSizeOfArguments(argStart, argEnd) + uint(len(returnValues)*8) -} diff --git a/vendor/github.com/minio/c2goasm/assembly.go b/vendor/github.com/minio/c2goasm/assembly.go deleted file mode 100644 index e14ad925..00000000 --- a/vendor/github.com/minio/c2goasm/assembly.go +++ /dev/null @@ -1,412 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "unicode" -) - -const originalStackPointer = 8 - -var registers = [...]string{"DI", "SI", "DX", "CX", "R8", "R9"} -var registersAdditional = [...]string{"R10", "R11", "R12", "R13", "R14", "R15", "AX", "BX"} -var regexpCall = regexp.MustCompile(`^\s*call\s*`) -var regexpPushInstr = regexp.MustCompile(`^\s*push\s*`) -var regexpPopInstr = regexp.MustCompile(`^\s*pop\s*`) -var regexpLabel = regexp.MustCompile(`^(\.?LBB.*?:)`) -var regexpJumpTableRef = regexp.MustCompile(`\[rip \+ (\.?LJTI[_0-9]*)\]\s*$`) -var regexpJumpWithLabel = regexp.MustCompile(`^(\s*j\w*)\s*(\.?LBB.*)`) -var regexpRbpLoadHigher = regexp.MustCompile(`\[rbp \+ ([0-9]+)\]`) -var regexpRbpLoadLower = regexp.MustCompile(`\[rbp - ([0-9]+)\]`) -var regexpStripComments = regexp.MustCompile(`\s*#?#\s.*$`) - -// Write the prologue for the subroutine -func writeGoasmPrologue(sub Subroutine, stack Stack, arguments, returnValues []string) []string { - - //if sub.name == "SimdSse2MedianFilterRhomb3x3" { - // fmt.Println("sub.name", sub.name) - // fmt.Println("sub.epilogue", sub.epilogue) - // fmt.Println("arguments", arguments) - // fmt.Println("returnValues", returnValues) - // fmt.Println("table.Name", sub.table.Name) - //} - - var result []string - - // Output definition of subroutine - result = append(result, fmt.Sprintf("TEXT ·_%s(SB), $%d-%d", sub.name, stack.GolangLocalStackFrameSize(), - getTotalSizeOfArgumentsAndReturnValues(0, len(arguments)-1, returnValues)), "") - - // Load Golang arguments into registers - for iarg, arg := range arguments { - - if iarg < len(registers) { - // Load initial arguments (up to 6) in corresponding registers - result = append(result, fmt.Sprintf(" MOVQ %s+%d(FP), %s", arg, iarg*8, registers[iarg])) - } else if iarg-len(registers) < len(registersAdditional) { - // Load following arguments into additional registers - result = append(result, fmt.Sprintf(" MOVQ %s+%d(FP), %s", arg, iarg*8, registersAdditional[iarg-len(registers)])) - } else { - panic("Trying to pass in too many arguments") - } - } - - // Setup the stack pointer for the C code - if sub.epilogue.AlignedStack { - // Align stack pointer to next multiple of alignment space - result = append(result, fmt.Sprintf(" MOVQ SP, BP")) - result = append(result, fmt.Sprintf(" ADDQ $%d, SP", stack.StackPointerOffsetForC() /*sub.epilogue.AlignValue*/)) - result = append(result, fmt.Sprintf(" ANDQ $-%d, SP", sub.epilogue.AlignValue)) - - // Save original stack pointer right below newly aligned stack pointer - result = append(result, fmt.Sprintf(" MOVQ BP, %d(SP)", stack.OffsetForSavedSP())) // Save original SP - - } else if stack.StackPointerOffsetForC() != 0 { // sub.epilogue.getStackSpace(len(arguments)) != 0 { - // Create stack space as needed - result = append(result, fmt.Sprintf(" ADDQ $%d, SP", stack.StackPointerOffsetForC() /*sub.epilogue.getFreeSpaceAtBottom())*/)) - } - - // Save Golang arguments beyond 6 onto stack - for iarg := len(arguments) - 1; iarg-len(registers) >= 0; iarg-- { - result = append(result, fmt.Sprintf(" MOVQ %s, %d(SP)", registersAdditional[iarg-len(registers)], stack.OffsetForGoArg(iarg))) - } - - // Setup base pointer for loading constants - if sub.table.isPresent() { - result = append(result, fmt.Sprintf(" LEAQ %s<>(SB), BP", sub.table.Name)) - } - - return append(result, ``) -} - -func writeGoasmBody(sub Subroutine, stack Stack, stackArgs StackArgs, arguments, returnValues []string) ([]string, error) { - - var result []string - - for iline, line := range sub.body { - - // If part of epilogue - if iline >= sub.epilogue.Start && iline < sub.epilogue.End { - - // Instead of last line, output go assembly epilogue - if iline == sub.epilogue.End-1 { - result = append(result, writeGoasmEpilogue(sub, stack, arguments, returnValues)...) - } - continue - } - - // Remove ## comments - var skipLine bool - line, skipLine = stripComments(line) - if skipLine { - continue - } - - // Skip lines with aligns - if strings.Contains(line, ".align") || strings.Contains(line, ".p2align") { - continue - } - - line, _ = fixLabels(line) - line, _, _ = upperCaseJumps(line) - line, _ = upperCaseCalls(line) - - fields := strings.Fields(line) - // Test for any non-jmp instruction (lower case mnemonic) - if len(fields) > 0 && !strings.Contains(fields[0], ":") && isLower(fields[0]) { - // prepend line with comment for subsequent asm2plan9s assembly - line = " // " + strings.TrimSpace(line) - } - - line = removeUndefined(line, "ptr") - line = removeUndefined(line, "# NOREX") - - // https://github.com/vertis/objconv/blob/master/src/disasm2.cpp - line = replaceUndefined(line, "xmmword", "oword") - line = replaceUndefined(line, "ymmword", "yword") - - line = fixShiftInstructions(line) - line = fixMovabsInstructions(line) - if sub.table.isPresent() { - line = fixPicLabels(line, sub.table) - } - - line = fixRbpPlusLoad(line, stackArgs, stack) - - detectRbpMinusMemoryAccess(line) - detectJumpTable(line) - detectPushInstruction(line) - detectPopInstruction(line) - - result = append(result, line) - } - - return result, nil -} - -// Write the epilogue for the subroutine -func writeGoasmEpilogue(sub Subroutine, stack Stack, arguments, returnValues []string) []string { - - var result []string - - // Restore the stack pointer - if sub.epilogue.AlignedStack { - // For an aligned stack, restore the stack pointer from the stack itself - result = append(result, fmt.Sprintf(" MOVQ %d(SP), SP", stack.OffsetForSavedSP())) - } else if stack.StackPointerOffsetForC() != 0 { - // For an unaligned stack, reverse addition in order restore the stack pointer - result = append(result, fmt.Sprintf(" SUBQ $%d, SP", stack.StackPointerOffsetForC())) - } - - // Clear upper half of YMM register, if so done in the original code - if sub.epilogue.VZeroUpper { - result = append(result, " VZEROUPPER") - } - - if len(returnValues) == 1 { - // Store return value of subroutine - result = append(result, fmt.Sprintf(" MOVQ AX, %s+%d(FP)", returnValues[0], - getTotalSizeOfArgumentsAndReturnValues(0, len(arguments)-1, returnValues)-8)) - } else if len(returnValues) > 1 { - panic(fmt.Sprintf("Fix multiple return values: %s", returnValues)) - } - - // Finally, return out of the subroutine - result = append(result, " RET") - - return result -} - -func scanBodyForCalls(sub Subroutine) uint { - - stackSize := uint(0) - - for _, line := range sub.body { - - _, size := upperCaseCalls(line) - - if stackSize < size { - stackSize = size - } - } - - return stackSize -} - -// Strip comments from assembly lines -func stripComments(line string) (result string, skipLine bool) { - - if match := regexpStripComments.FindStringSubmatch(line); len(match) > 0 { - line = line[:len(line)-len(match[0])] - if line == "" { - return "", true - } - } - return line, false -} - -// Remove leading `.` from labels -func fixLabels(line string) (string, string) { - - label := "" - - if match := regexpLabel.FindStringSubmatch(line); len(match) > 0 { - label = strings.Replace(match[1], ".", "", 1) - line = label - label = strings.Replace(label, ":", "", 1) - } - - return line, label -} - -// Make jmps uppercase -func upperCaseJumps(line string) (string, string, string) { - - instruction, label := "", "" - - if match := regexpJumpWithLabel.FindStringSubmatch(line); len(match) > 1 { - // make jmp statement uppercase - instruction = strings.ToUpper(match[1]) - label = strings.Replace(match[2], ".", "", 1) - line = instruction + " " + label - - } - - return line, strings.TrimSpace(instruction), label -} - -// Make calls uppercase -func upperCaseCalls(line string) (string, uint) { - - // TODO: Make determination of required stack size more sophisticated - stackSize := uint(0) - - // Make 'call' instructions uppercase - if match := regexpCall.FindStringSubmatch(line); len(match) > 0 { - parts := strings.SplitN(line, `call`, 2) - fname := strings.TrimSpace(parts[1]) - - // replace c stdlib functions with equivalents - if fname == "_memcpy" || fname == "memcpy@PLT" { // (Procedure Linkage Table) - parts[1] = "clib·_memcpy(SB)" - stackSize = 64 - } else if fname == "_memset" || fname == "memset@PLT" { // (Procedure Linkage Table) - parts[1] = "clib·_memset(SB)" - stackSize = 64 - } else if fname == "_floor" || fname == "floor@PLT" { // (Procedure Linkage Table) - parts[1] = "clib·_floor(SB)" - stackSize = 64 - } else if fname == "___bzero" { - parts[1] = "clib·_bzero(SB)" - stackSize = 64 - } - line = parts[0] + "CALL " + strings.TrimSpace(parts[1]) - } - - return line, stackSize -} - -func isLower(str string) bool { - - for _, r := range str { - return unicode.IsLower(r) - } - return false -} - -func removeUndefined(line, undef string) string { - - if parts := strings.SplitN(line, undef, 2); len(parts) > 1 { - line = parts[0] + strings.TrimSpace(parts[1]) - } - return line -} - -func replaceUndefined(line, undef, repl string) string { - - if parts := strings.SplitN(line, undef, 2); len(parts) > 1 { - line = parts[0] + repl + parts[1] - } - return line -} - -// fix Position Independent Labels -func fixPicLabels(line string, table Table) string { - - if strings.Contains(line, "[rip + ") { - parts := strings.SplitN(line, "[rip + ", 2) - label := parts[1][:len(parts[1])-1] - - i := -1 - var l Label - for i, l = range table.Labels { - if l.Name == label { - line = parts[0] + fmt.Sprintf("%d[rbp] /* [rip + %s */", l.Offset, parts[1]) - break - } - } - if i == len(table.Labels) { - panic(fmt.Sprintf("Failed to find label to replace of position independent code: %s", label)) - } - } - - return line -} - -func fixShiftNoArgument(line, ins string) string { - - if strings.Contains(line, ins) { - parts := strings.SplitN(line, ins, 2) - args := strings.SplitN(parts[1], ",", 2) - if len(args) == 1 { - line += ", 1" - } - } - - return line -} - -func fixShiftInstructions(line string) string { - - line = fixShiftNoArgument(line, "shr") - line = fixShiftNoArgument(line, "sar") - - return line -} - -func fixMovabsInstructions(line string) string { - - if strings.Contains(line, "movabs") { - parts := strings.SplitN(line, "movabs", 2) - line = parts[0] + "mov" + parts[1] - } - - return line -} - -// Fix loads in the form of '[rbp + constant]' -// These are load instructions for stack-based arguments that occur after the first 6 arguments -// Remap to stack pointer -func fixRbpPlusLoad(line string, stackArgs StackArgs, stack Stack) string { - - if match := regexpRbpLoadHigher.FindStringSubmatch(line); len(match) > 1 { - offset, _ := strconv.Atoi(match[1]) - // TODO: Get proper offset for non 64-bit arguments - iarg := len(registers) + (offset-stackArgs.OffsetToFirst)/8 - parts := strings.SplitN(line, match[0], 2) - line = parts[0] + fmt.Sprintf("%d[rsp]%s /* %s */", stack.OffsetForGoArg(iarg), parts[1], match[0]) - } - - return line -} - -// Detect memory accesses in the form of '[rbp - constant]' -func detectRbpMinusMemoryAccess(line string) { - - if match := regexpRbpLoadLower.FindStringSubmatch(line); len(match) > 1 { - - panic(fmt.Sprintf("Not expected to find [rbp -] based loads: %s\n\nDid you specify `-mno-red-zone`?\n\n", line)) - } -} - -// Detect jump tables -func detectJumpTable(line string) { - - if match := regexpJumpTableRef.FindStringSubmatch(line); len(match) > 0 { - panic(fmt.Sprintf("Jump table detected: %s\n\nCircumvent using '-fno-jump-tables', see 'clang -cc1 -help' (version 3.9+)\n\n", match[1])) - } -} - -// Detect push instructions -func detectPushInstruction(line string) { - - if match := regexpPushInstr.FindStringSubmatch(line); len(match) > 0 { - panic(fmt.Sprintf("push instruction detected: %s\n\nCannot modify `rsp` in body of assembly\n\n", match[1])) - } -} - -// Detect pop instructions -func detectPopInstruction(line string) { - - if match := regexpPopInstr.FindStringSubmatch(line); len(match) > 0 { - panic(fmt.Sprintf("pop instruction detected: %s\n\nCannot modify `rsp` in body of assembly\n\n", match[1])) - } -} diff --git a/vendor/github.com/minio/c2goasm/c2goasm.go b/vendor/github.com/minio/c2goasm/c2goasm.go deleted file mode 100644 index 84ba554c..00000000 --- a/vendor/github.com/minio/c2goasm/c2goasm.go +++ /dev/null @@ -1,300 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "bufio" - "encoding/hex" - "flag" - "fmt" - "log" - "os" - "os/exec" - "regexp" - "strings" -) - -var ( - assembleFlag = flag.Bool("a", false, "Immediately invoke asm2plan9s") - stripFlag = flag.Bool("s", false, "Strip comments") - compactFlag = flag.Bool("c", false, "Compact byte codes") - formatFlag = flag.Bool("f", false, "Format using asmfmt") - targetFlag = flag.String("t", "x86", "Target machine of input code") -) - -// readLines reads a whole file into memory -// and returns a slice of its lines. -func readLines(path string) ([]string, error) { - file, err := os.Open(path) - if err != nil { - return nil, err - } - defer file.Close() - - var lines []string - scanner := bufio.NewScanner(file) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - return lines, scanner.Err() -} - -// writeLines writes the lines to the given file. -func writeLines(lines []string, path string, header bool) error { - file, err := os.Create(path) - if err != nil { - return err - } - defer file.Close() - - w := bufio.NewWriter(file) - if header { - fmt.Fprintln(w, "//+build !noasm !appengine") - fmt.Fprintln(w, "// AUTO-GENERATED BY C2GOASM -- DO NOT EDIT") - fmt.Fprintln(w, "") - } - for _, line := range lines { - fmt.Fprintln(w, line) - } - return w.Flush() -} - -func process(assembly []string, goCompanionFile string) ([]string, error) { - - // Split out the assembly source into subroutines - subroutines := segmentSource(assembly) - tables := segmentConstTables(assembly) - - var result []string - - // Iterate over all subroutines - for isubroutine, sub := range subroutines { - - golangArgs, golangReturns := parseCompanionFile(goCompanionFile, sub.name) - stackArgs := argumentsOnStack(sub.body) - if len(golangArgs) > 6 && len(golangArgs)-6 < stackArgs.Number { - panic(fmt.Sprintf("Found too few arguments on stack (%d) but needed %d", len(golangArgs)-6, stackArgs.Number)) - } - - // Check for constants table - if table := getCorrespondingTable(sub.body, tables); table.isPresent() { - - // Output constants table - result = append(result, strings.Split(table.Constants, "\n")...) - result = append(result, "") // append empty line - - sub.table = table - } - - // Create object to get offsets for stack pointer - stack := NewStack(sub.epilogue, len(golangArgs), scanBodyForCalls(sub)) - - // Write header for subroutine in go assembly - result = append(result, writeGoasmPrologue(sub, stack, golangArgs, golangReturns)...) - - // Write body of code - assembly, err := writeGoasmBody(sub, stack, stackArgs, golangArgs, golangReturns) - if err != nil { - panic(fmt.Sprintf("writeGoasmBody: %v", err)) - } - result = append(result, assembly...) - - if isubroutine < len(subroutines)-1 { - // Empty lines before next subroutine - result = append(result, "\n", "\n") - } - } - - return result, nil -} - -func stripGoasmComments(file string) { - - lines, err := readLines(file) - if err != nil { - log.Fatalf("readLines: %s", err) - } - - for i, l := range lines { - if strings.Contains(l, "LONG") || strings.Contains(l, "WORD") || strings.Contains(l, "BYTE") { - opcode := strings.TrimSpace(strings.SplitN(l, "//", 2)[0]) - lines[i] = strings.SplitN(l, opcode, 2)[0] + opcode - } - } - - err = writeLines(lines, file, false) - if err != nil { - log.Fatalf("writeLines: %s", err) - } -} - -func reverseBytes(hex string) string { - - result := "" - for i := len(hex) - 2; i >= 0; i -= 2 { - result = result + hex[i:i+2] - } - return result -} - -func compactArray(opcodes []byte) []string { - - var result []string - - dst := make([]byte, hex.EncodedLen(len(opcodes))) - hex.Encode(dst, opcodes) - - q := 0 - for ; q+31 < len(dst); q += 32 { - result = append(result, fmt.Sprintf(" QUAD $0x%s; QUAD $0x%s", reverseBytes(string(dst[q:q+16])), reverseBytes(string(dst[q+16:q+32])))) - } - for ; q+15 < len(dst); q += 16 { - result = append(result, fmt.Sprintf(" QUAD $0x%s", reverseBytes(string(dst[q:q+16])))) - } - if q < len(dst) { - last := "" - l := 0 - if q+7 < len(dst) { - last += fmt.Sprintf("LONG $0x%s", reverseBytes(string(dst[q:q+8]))) - l = 8 - } - w := 0 - if q+l+3 < len(dst) { - if len(last) > 0 { - last = last + "; " - } - last += fmt.Sprintf("WORD $0x%s", reverseBytes(string(dst[q+l:q+l+4]))) - w = 4 - } - if q+l+w+1 < len(dst) { - if len(last) > 0 { - last = last + "; " - } - last += fmt.Sprintf("BYTE $0x%s", dst[q+l+w:q+l+w+2]) - } - result = append(result, " "+last) - } - - return result -} - -func compactOpcodes(file string) { - - lines, err := readLines(file) - if err != nil { - log.Fatalf("readLines: %s", err) - } - - var result []string - - opcodes := make([]byte, 0, 1000) - - hexMatch := regexp.MustCompile(`(\$0x[0-9a-f]+)`) - - for _, l := range lines { - if strings.Contains(l, "LONG") || strings.Contains(l, "WORD") || strings.Contains(l, "BYTE") { - match := hexMatch.FindAllStringSubmatch(l, -1) - for _, m := range match { - dst := make([]byte, hex.DecodedLen(len(m[0][3:]))) - _, err := hex.Decode(dst, []byte(m[0][3:])) - if err != nil { - log.Fatal(err) - } - for i := len(dst) - 1; i >= 0; i -= 1 { // append starting with lowest byte first - opcodes = append(opcodes, dst[i:i+1]...) - } - } - } else { - - if len(opcodes) != 0 { - result = append(result, compactArray(opcodes)...) - opcodes = opcodes[:0] - } - - result = append(result, l) - } - } - - err = writeLines(result, file, false) - if err != nil { - log.Fatalf("writeLines: %s", err) - } -} - -func main() { - - flag.Parse() - - if flag.NArg() < 2 { - fmt.Printf("error: not enough input files specified\n\n") - fmt.Println("usage: c2goasm /path/to/c-project/build/SomeGreatCode.cpp.s SomeGreatCode_amd64.s") - return - } - assemblyFile := flag.Arg(1) - if !strings.HasSuffix(assemblyFile, ".s") { - fmt.Printf("error: second parameter must have '.s' extension\n") - return - } - - goCompanion := assemblyFile[:len(assemblyFile)-2] + ".go" - if _, err := os.Stat(goCompanion); os.IsNotExist(err) { - fmt.Printf("error: companion '.go' file is missing for %s\n", flag.Arg(1)) - return - } - - fmt.Println("Processing", flag.Arg(0)) - lines, err := readLines(flag.Arg(0)) - if err != nil { - log.Fatalf("readLines: %s", err) - } - - result, err := process(lines, goCompanion) - if err != nil { - fmt.Print(err) - os.Exit(-1) - } - - err = writeLines(result, assemblyFile, true) - if err != nil { - log.Fatalf("writeLines: %s", err) - } - - if *assembleFlag { - fmt.Println("Invoking asm2plan9s on", assemblyFile) - cmd := exec.Command("asm2plan9s", assemblyFile) - _, err := cmd.CombinedOutput() - if err != nil { - log.Fatalf("asm2plan9s: %v", err) - } - } - - if *stripFlag { - stripGoasmComments(assemblyFile) - } - - if *compactFlag { - compactOpcodes(assemblyFile) - } - - if *formatFlag { - cmd := exec.Command("asmfmt", "-w", assemblyFile) - _, err := cmd.CombinedOutput() - if err != nil { - log.Fatalf("asmfmt: %v", err) - } - } -} diff --git a/vendor/github.com/minio/c2goasm/constants.go b/vendor/github.com/minio/c2goasm/constants.go deleted file mode 100644 index 28084d51..00000000 --- a/vendor/github.com/minio/c2goasm/constants.go +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -type Table struct { - Name string - Constants string - Labels []Label -} - -func (t *Table) isPresent() bool { - return len(t.Name) > 0 -} - -type Label struct { - Name string - Offset uint -} - -func getSingleNumber(line string) int64 { - - if len(strings.Fields(line)) > 2 { - panic(fmt.Sprintf("Too many fields found: %d", len(strings.Fields(line)))) - } - field := strings.Fields(line)[1] - if len(strings.Split(field, ",")) > 1 { - panic(fmt.Sprintf("Unexpected comma found in field: %s", field)) - } - v, err := strconv.ParseInt(field, 10, 64) - if err != nil { - panic(fmt.Sprintf("Number parsing error: %v", err)) - } - return v -} - -func getDualNumbers(line string) (int64, int64) { - - if len(strings.Fields(line)) > 2 { - panic(fmt.Sprintf("Too many fields found: %d", len(strings.Fields(line)))) - } - field := strings.Fields(line)[1] - args := strings.Split(field, ",") - if len(args) > 2 { - panic(fmt.Sprintf("Too many commas found in field: %s", field)) - } - r1, err := strconv.ParseInt(args[0], 10, 64) - if err != nil { - panic(fmt.Sprintf("Number parsing error: %v", err)) - } - r2 := int64(0) - if len(args) > 1 { - r2, err = strconv.ParseInt(args[1], 10, 64) - if err != nil { - panic(fmt.Sprintf("Number parsing error: %v", err)) - } - } - - return r1, r2 -} - -// Sanify check to detect labels with identical offsets -func sanityCheckLabels(labels []Label) { - - for i := 0; i < len(labels)-1; i++ { - if labels[i].Offset == labels[i+1].Offset { - panic(fmt.Sprintf("Detected two labels with identical offsets: %v and %v", labels[i], labels[i+1])) - } - } -} - -func defineTable(constants []string, tableName string) Table { - - labels := []Label{} - bytes := make([]byte, 0, 1000) - - for _, line := range constants { - - line, _ = stripComments(line) - - if strings.HasSuffix(line, ":") { - labels = append(labels, Label{Name: line[:len(line)-1], Offset: uint(len(bytes))}) - } else if strings.Contains(line, ".byte") { - v := getSingleNumber(line) - bytes = append(bytes, byte(v)) - } else if strings.Contains(line, ".short") { - v := getSingleNumber(line) - bytes = append(bytes, byte(v)) - bytes = append(bytes, byte(v>>8)) - } else if strings.Contains(line, ".long") { - v := getSingleNumber(line) - bytes = append(bytes, byte(v)) - bytes = append(bytes, byte(v>>8)) - bytes = append(bytes, byte(v>>16)) - bytes = append(bytes, byte(v>>24)) - } else if strings.Contains(line, ".quad") { - v, err := strconv.ParseInt(strings.Fields(line)[1], 10, 64) - if err != nil { - panic(fmt.Sprintf("Atoi error for .quad: %v", err)) - } - bytes = append(bytes, byte(v)) - bytes = append(bytes, byte(v>>8)) - bytes = append(bytes, byte(v>>16)) - bytes = append(bytes, byte(v>>24)) - bytes = append(bytes, byte(v>>32)) - bytes = append(bytes, byte(v>>40)) - bytes = append(bytes, byte(v>>48)) - bytes = append(bytes, byte(v>>56)) - } else if strings.Contains(line, ".align") || strings.Contains(line, ".p2align") { - fields := strings.FieldsFunc(line, func(c rune) bool { return c == ',' || c == ' ' || c == '\t' }) - if len(fields) <= 1 || 4 <= len(fields) { - panic(fmt.Sprintf(".p2align must have 2 or 3 arguments; got %v", fields)) - } - bits, err := strconv.ParseInt(fields[1], 10, 64) - if err != nil { - panic(err) - } - padVal := int64(0) - if len(fields) > 2 { - padVal, err = strconv.ParseInt(fields[2], 0, 64) - if err != nil { - panic(err) - } - } - align := 1 << uint(bits) - if strings.Contains(line, ".align") && - (strings.Contains(strings.ToLower(*targetFlag), "x86") || - strings.Contains(strings.ToLower(*targetFlag), "amd64")) { - // For historic reasons, the behavior of .align differs between - // architectures. The llvm for x86 alignment is in bytes. - // https://reviews.llvm.org/D16549 - // http://lists.llvm.org/pipermail/llvm-dev/2009-June/022771.html - // https://users.elis.ugent.be/~jvcleemp/LLVM-2.4-doxygen/TargetAsmInfo_8h_source.html#l00261 - align = int(bits) - } - for len(bytes)%align != 0 { - bytes = append(bytes, byte(padVal)) - } - } else if strings.Contains(line, ".space") || strings.Contains(line, ".zero") { - length, b := getDualNumbers(line) - for i := int64(0); i < length; i++ { - bytes = append(bytes, byte(b)) - } - } else if strings.Contains(line, ".section") { - // ignore - } else if strings.Contains(line, ".text") { - // ignore - } else { - panic(fmt.Sprintf("Unknown line for table: %s", line)) - } - } - - // Pad onto a multiple of 8 bytes for aligned outputting - for len(bytes)%8 != 0 { - bytes = append(bytes, 0) - } - - table := []string{} - - for i := 0; i < len(bytes); i += 8 { - offset := fmt.Sprintf("%03x", i) - hex := "" - for j := i; j < i+8 && j < len(bytes); j++ { - hex = fmt.Sprintf("%02x", bytes[j]) + hex - } - table = append(table, fmt.Sprintf("DATA %s<>+0x%s(SB)/8, $0x%s", tableName, offset, hex)) - } - table = append(table, fmt.Sprintf("GLOBL %s<>(SB), 8, $%d", tableName, len(bytes))) - - sanityCheckLabels(labels) - - return Table{Name: tableName, Constants: strings.Join(table, "\n"), Labels: labels} -} - -var regexpLabelConstant = regexp.MustCompile(`^\.?LCPI[0-9]+_0:`) - -func getFirstLabelConstants(lines []string) int { - - for iline, line := range lines { - if match := regexpLabelConstant.FindStringSubmatch(line); len(match) > 0 { - return iline - } - } - - return -1 -} - -type Const struct { - name string - start, end int -} - -func segmentConstTables(lines []string) []Table { - - consts := []Const{} - - globals := splitOnGlobals(lines) - - if len(globals) == 0 { - return []Table{} - } - - splitBegin := 0 - for _, global := range globals { - start := getFirstLabelConstants(lines[splitBegin:global.dotGlobalLine]) - if start != -1 { - // Add set of lines when a constant table has been found - consts = append(consts, Const{name: fmt.Sprintf("LCDATA%d", len(consts)+1), start: splitBegin + start, end: global.dotGlobalLine}) - } - splitBegin = global.dotGlobalLine + 1 - } - - tables := []Table{} - - for _, c := range consts { - - tables = append(tables, defineTable(lines[c.start:c.end], c.name)) - } - - return tables -} - -func getCorrespondingTable(lines []string, tables []Table) Table { - - concat := strings.Join(lines, "\n") - - for _, t := range tables { - // Easy test -- we assume that if we find one label, we would find the others as well... - if strings.Contains(concat, t.Labels[0].Name) { - return t - } - } - - return Table{} -} diff --git a/vendor/github.com/minio/c2goasm/epilogue.go b/vendor/github.com/minio/c2goasm/epilogue.go deleted file mode 100644 index 63eb887b..00000000 --- a/vendor/github.com/minio/c2goasm/epilogue.go +++ /dev/null @@ -1,219 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "fmt" - "regexp" - "strconv" -) - -type Epilogue struct { - Pops []string // list of registers that are popped of the stack - SetRbpInstr bool // is there an instruction to set Rbp in epilogue? - StackSize uint // the size of the C stack - AlignedStack bool // is this an aligned stack? - AlignValue uint // alignment value in case of an aligned stack - VZeroUpper bool // is there a vzeroupper instruction in the epilogue? - Start, End int // start and ending lines of epilogue - _missingPops int // internal variable to identify first push without a corresponding pop - _stackGrowthSign int // direction to grow stack in case of detecting a push without a corresponding pop -} - -var regexpAddRsp = regexp.MustCompile(`^\s*add\s*rsp, ([0-9]+)$`) -var regexpAndRsp = regexp.MustCompile(`^\s*and\s*rsp, \-([0-9]+)$`) -var regexpSubRsp = regexp.MustCompile(`^\s*sub\s*rsp, ([0-9]+)$`) -var regexpLeaRsp = regexp.MustCompile(`^\s*lea\s*rsp, `) -var regexpPop = regexp.MustCompile(`^\s*pop\s*([a-z0-9]+)$`) -var regexpPush = regexp.MustCompile(`^\s*push\s*([a-z0-9]+)$`) -var regexpMov = regexp.MustCompile(`^\s*mov\s*([a-z0-9]+), ([a-z0-9]+)$`) -var regexpVZeroUpper = regexp.MustCompile(`^\s*vzeroupper\s*$`) -var regexpReturn = regexp.MustCompile(`^\s*ret\s*$`) - -type Stack struct { - alignedStack bool // is this an aligned stack? - - goSavedSP uint // space to save a copy of the original Stack Pointer as passed in by Go (for aligned stacks) - goArgCopies uint // space used to store copies of golang args not passed in registers (arguments 7 and higher) - localSpace uint // stack space used by C code - freeSpace uint // free stack space used for CALLs - untouchedSpace uint // untouched space to prevent overwriting return address for final RET statement -} - -func NewStack(epilogue Epilogue, arguments int, stackSpaceForCalls uint) Stack { - - s := Stack{localSpace: epilogue.StackSize, alignedStack: epilogue.AlignedStack, freeSpace: stackSpaceForCalls} - - if arguments-len(registers) > 0 { - s.goArgCopies = uint(8 * (arguments - len(registers))) - } - - if s.alignedStack { - // For an aligned stack we need to save the original Stack Pointer as passed in by Go - s.goSavedSP = originalStackPointer - - // We are rounding freeSpace to a multiple of the alignment value - s.freeSpace = (s.freeSpace + epilogue.AlignValue - 1) & ^(epilogue.AlignValue - 1) - - // Create unused space at the bottom of the stack to guarantee alignment - s.untouchedSpace = epilogue.AlignValue - } else { - // Only when we are using no stack whatsoever, do we not need to reserve space to save the return address - if s.freeSpace+s.localSpace+s.goArgCopies+s.goSavedSP > 0 { - s.untouchedSpace = 8 - } - } - - return s -} - -// Get total local stack frame size (for Go) used in TEXT definition -func (s Stack) GolangLocalStackFrameSize() uint { - return s.untouchedSpace + s.freeSpace + s.localSpace + s.goArgCopies + s.goSavedSP -} - -// Get offset to adjust Stack Pointer appropriately for C code -func (s Stack) StackPointerOffsetForC() uint { - return s.untouchedSpace + s.freeSpace -} - -// Get offset (from C Stack Pointer) for saving original Golang Stack Pointer -func (s Stack) OffsetForSavedSP() uint { - if s.goSavedSP == 0 { - panic("There should be space reserved for OffsetForSavedSP") - } - return s.localSpace + s.goArgCopies -} - -// Get offset (from C Stack Pointer) for copy of Golang arguments 7 and higher -func (s Stack) OffsetForGoArg(iarg int) uint { - - offset := uint((iarg - len(registers)) * 8) - if offset > s.goArgCopies { - panic("Offset for higher number argument asked for than reserved") - } - return s.localSpace + offset -} - -func extractEpilogueInfo(src []string, sliceStart, sliceEnd int) Epilogue { - - epilogue := Epilogue{Start: sliceStart, End: sliceEnd} - - // Iterate over epilogue, starting from last instruction - for ipost := sliceEnd - 1; ipost >= sliceStart; ipost-- { - line := src[ipost] - - if !epilogue.extractEpilogue(line) { - panic(fmt.Sprintf("Unknown line for epilogue: %s", line)) - } - } - - return epilogue -} - -func (e *Epilogue) extractEpilogue(line string) bool { - - if match := regexpPop.FindStringSubmatch(line); len(match) > 1 { - register := match[1] - - e.Pops = append(e.Pops, register) - if register == "rbp" { - e.SetRbpInstr = true - } - } else if match := regexpAddRsp.FindStringSubmatch(line); len(match) > 1 { - size, _ := strconv.Atoi(match[1]) - e.StackSize = uint(size) - } else if match := regexpLeaRsp.FindStringSubmatch(line); len(match) > 0 { - e.AlignedStack = true - } else if match := regexpVZeroUpper.FindStringSubmatch(line); len(match) > 0 { - e.VZeroUpper = true - } else if match := regexpMov.FindStringSubmatch(line); len(match) > 2 && match[1] == "rsp" && match[2] == "rbp" { - // no action to take - } else if match := regexpReturn.FindStringSubmatch(line); len(match) > 0 { - // no action to take - } else { - return false - } - - return true -} - -func isEpilogueInstruction(line string) bool { - - return (&Epilogue{}).extractEpilogue(line) -} - -func (e *Epilogue) isPrologueInstruction(line string) bool { - - if match := regexpPush.FindStringSubmatch(line); len(match) > 1 { - hasCorrespondingPop := listContains(match[1], e.Pops) - if !hasCorrespondingPop { - e._missingPops++ - if e._missingPops == 1 { // only for first missing pop, set initial direction of growth to adapt check - if e.StackSize > 0 { - // Missing corresponding `pop` but rsp was modified directly in epilogue (see test-case pro/epilogue6) - e._stackGrowthSign = -1 - } else { - // Missing corresponding `pop` meaning rsp is grown indirectly in prologue (see test-case pro/epilogue7) - e._stackGrowthSign = 1 - } - } - e.StackSize += uint(8 * e._stackGrowthSign) - if e.StackSize == 0 && e._stackGrowthSign == -1 { - e._stackGrowthSign = 1 // flip direction once stack has shrunk to zero - } - } - return true - } else if match := regexpMov.FindStringSubmatch(line); len(match) > 2 && match[1] == "rbp" && match[2] == "rsp" { - if e.SetRbpInstr { - return true - } else { - panic(fmt.Sprintf("mov found but not expected to be set: %s", line)) - } - } else if match := regexpAndRsp.FindStringSubmatch(line); len(match) > 1 { - align, _ := strconv.Atoi(match[1]) - if e.AlignedStack && align == 8 { - // golang stack is already 8 byte aligned so we can effectively disable the aligned stack - e.AlignedStack = false - } else { - e.AlignValue = uint(align) - } - - return true - } else if match := regexpSubRsp.FindStringSubmatch(line); len(match) > 1 { - space, _ := strconv.Atoi(match[1]) - if !e.AlignedStack && e.StackSize == uint(space) { - return true - } else if e.StackSize == 0 || e.StackSize == uint(space) { - e.StackSize = uint(space) // Update stack size when found in header (and missing in footer due to `lea` instruction) - return true - } else { - panic(fmt.Sprintf("'sub rsp' found but in unexpected scenario: %s", line)) - } - } - - return false -} - -func listContains(value string, list []string) bool { - for _, v := range list { - if v == value { - return true - } - } - return false -} diff --git a/vendor/github.com/minio/c2goasm/subroutine.go b/vendor/github.com/minio/c2goasm/subroutine.go deleted file mode 100644 index 91d580d2..00000000 --- a/vendor/github.com/minio/c2goasm/subroutine.go +++ /dev/null @@ -1,294 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "fmt" - "regexp" - "strconv" - "strings" - "unicode" -) - -var regexpRet = regexp.MustCompile(`^\s*ret`) - -type Subroutine struct { - name string - body []string - epilogue Epilogue - table Table -} - -type Global struct { - dotGlobalLine int - globalName string - globalLabelLine int -} - -var globlRe = regexp.MustCompile(`^\s*.globl\s+([^\s]+)\s*.*`) - -func splitOnGlobals(lines []string) []Global { - - var result []Global - - for index, line := range lines { - if globlRe.MatchString(line) { - match := globlRe.FindStringSubmatch(line) - scrambled := match[1] - name := extractName(scrambled) - - labelLine := findLabel(lines, scrambled) - - result = append(result, Global{dotGlobalLine: index, globalName: name, globalLabelLine: labelLine}) - } - } - - return result -} - -// Segment the source into multiple routines -func segmentSource(src []string) []Subroutine { - - globals := splitOnGlobals(src) - - if len(globals) == 0 { - return []Subroutine{} - } - - subroutines := []Subroutine{} - - splitBegin := globals[0].dotGlobalLine - for iglobal, global := range globals { - splitEnd := len(src) - if iglobal < len(globals)-1 { - splitEnd = globals[iglobal+1].dotGlobalLine - } - - // Search for `ret` statement - for lineRet := splitBegin; lineRet < splitEnd; lineRet++ { - if match := regexpRet.FindStringSubmatch(src[lineRet]); len(match) > 0 { - - newsub := extractSubroutine(lineRet, src, global) - - subroutines = append(subroutines, newsub) - - break - } - } - - splitBegin = splitEnd - } - - return subroutines -} - -var disabledForTesting = false - -func extractSubroutine(lineRet int, src []string, global Global) Subroutine { - - bodyStart := global.globalLabelLine + 1 - bodyEnd := lineRet + 1 - - // loop until all missing labels are found - for !disabledForTesting { - missingLabels := getMissingLabels(src[bodyStart:bodyEnd]) - - if len(missingLabels) == 0 { - break - } - - // add the missing lines in order to find the missing labels - postEpilogueLines := getMissingLines(src, bodyEnd-1, missingLabels) - - bodyEnd += postEpilogueLines - } - - subroutine := Subroutine{ - name: global.globalName, - body: src[bodyStart:bodyEnd], - epilogue: extractEpilogue(src[bodyStart:bodyEnd]), - } - - // Remove prologue lines from subroutine - subroutine.removePrologueLines(src, bodyStart, bodyEnd) - - return subroutine -} - -func (s *Subroutine) removePrologueLines(src []string, bodyStart int, bodyEnd int) { - - prologueLines := getPrologueLines(src[bodyStart:bodyEnd], &s.epilogue) - - // Remove prologue lines from body - s.body = s.body[prologueLines:] - - // Adjust range of epilogue accordingly - s.epilogue.Start -= prologueLines - s.epilogue.End -= prologueLines -} - -func extractEpilogue(src []string) Epilogue { - - for iline, line := range src { - - if match := regexpRet.FindStringSubmatch(line); len(match) > 0 { - - // Found closing ret statement, start searching back to first non epilogue instruction - epilogueStart := iline - for ; epilogueStart >= 0; epilogueStart-- { - if !isEpilogueInstruction(src[epilogueStart]) { - epilogueStart++ - break - } - } - - epilogue := extractEpilogueInfo(src, epilogueStart, iline+1) - - return epilogue - } - } - - panic("Failed to find 'ret' instruction") -} - -func getMissingLabels(src []string) map[string]bool { - - labelMap := make(map[string]bool) - jumpMap := make(map[string]bool) - - for _, line := range src { - - line, _ := stripComments(line) - if _, label := fixLabels(line); label != "" { - labelMap[label] = true - } - if _, _, label := upperCaseJumps(line); label != "" { - jumpMap[label] = true - } - - } - - for label, _ := range labelMap { - if _, ok := jumpMap[label]; ok { - delete(jumpMap, label) - } - } - - return jumpMap -} - -func getMissingLines(src []string, lineRet int, missingLabels map[string]bool) int { - - var iline int - // first scan until we've found the missing labels - for iline = lineRet; len(missingLabels) > 0 && iline < len(src); iline++ { - line, _ := stripComments(src[iline]) - _, label := fixLabels(line) - if label != "" { - if _, ok := missingLabels[label]; ok { - delete(missingLabels, label) - } - } - } - // then scan until we find an (unconditional) JMP - for ; iline < len(src); iline++ { - line, _ := stripComments(src[iline]) - _, jump, _ := upperCaseJumps(line) - if jump == "JMP" { - break - } - } - - return iline - lineRet -} - -func getPrologueLines(lines []string, epilogue *Epilogue) int { - - index, line := 0, "" - - for index, line = range lines { - - var skip bool - line, skip = stripComments(line) // Remove ## comments - if skip { - continue - } - - if !epilogue.isPrologueInstruction(line) { - break - } - } - - return index -} - -func findLabel(lines []string, label string) int { - - labelDef := label + ":" - - for index, line := range lines { - if strings.HasPrefix(line, labelDef) { - return index - } - } - - panic(fmt.Sprintf("Failed to find label: %s", labelDef)) -} - -func extractNamePart(part string) (int, string) { - - digits := 0 - for _, d := range part { - if unicode.IsDigit(d) { - digits += 1 - } else { - break - } - } - length, _ := strconv.Atoi(part[:digits]) - return digits + length, part[digits:(digits + length)] -} - -func extractName(name string) string { - - // Only proceed for C++ mangled names - if !(strings.HasPrefix(name, "_ZN") || strings.HasPrefix(name, "__Z")) { - return name - } - - var parts []string - - // Parse C++ mangled name in the form of '_ZN4Simd4Avx213Yuv444pToBgraEPKhmS2_mS2_mmmPhmh' - for index, ch := range name { - if unicode.IsDigit(ch) { - - for index < len(name) { - size, part := extractNamePart(name[index:]) - if size == 0 { - break - } - - parts = append(parts, part) - index += size - } - - break - } - } - - return strings.Join(parts, "") -} diff --git a/vendor/github.com/paulmach/orb/CHANGELOG.md b/vendor/github.com/paulmach/orb/CHANGELOG.md index 70db8e87..1783940b 100644 --- a/vendor/github.com/paulmach/orb/CHANGELOG.md +++ b/vendor/github.com/paulmach/orb/CHANGELOG.md @@ -2,6 +2,44 @@ All notable changes to this project will be documented in this file. +## [v0.11.1](https://github.com/paulmach/orb/compare/v0.11.0...v0.11.1) - 2024-01-29 + +### Fixed + +- geojson: `null` json into non-pointer Feature/FeatureCollection will set them to empty by [@paulmach](https://github.com/paulmach)in https://github.com/paulmach/orb/pull/145 + +## [v0.11.0](https://github.com/paulmach/orb/compare/v0.10.0...v0.11.0) - 2024-01-11 + +### Fixed + +- quadtree: InBoundMatching does not properly accept passed-in buffer by [@nirmal-vuppuluri](https://github.com/nirmal-vuppuluri) in https://github.com/paulmach/orb/pull/139 +- mvt: Do not swallow error cause by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/137 + +### Changed + +- simplify: Visvalingam, by default, keeps 3 points for "areas" by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/140 +- encoding/mvt: skip encoding of features will nil geometry by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/141 +- encoding/wkt: improve unmarshalling performance by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/142 + +## [v0.10.0](https://github.com/paulmach/orb/compare/v0.9.2...v0.10.0) - 2023-07-16 + +### Added + +- add ChildrenInZoomRange method to maptile.Tile by [@peitili](https://github.com/peitili) in https://github.com/paulmach/orb/pull/133 + +## [v0.9.2](https://github.com/paulmach/orb/compare/v0.9.1...v0.9.2) - 2023-05-04 + +### Fixed + +- encoding/wkt: better handling/validation of missing parens by [@paulmach](https://github.com/paulmach) in https://github.com/paulmach/orb/pull/131 + +## [v0.9.1](https://github.com/paulmach/orb/compare/v0.9.0...v0.9.1) - 2023-04-26 + +### Fixed + +- Bump up mongo driver to 1.11.4 by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/129 +- encoding/wkt: split strings with regexp by [@m-pavel](https://github.com/m-pavel) in https://github.com/paulmach/orb/pull/128 + ## [v0.9.0](https://github.com/paulmach/orb/compare/v0.8.0...v0.9.0) - 2023-02-19 ### Added diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md index 4629c9d0..dee77545 100644 --- a/vendor/github.com/pierrec/lz4/v4/README.md +++ b/vendor/github.com/pierrec/lz4/v4/README.md @@ -21,7 +21,7 @@ go get github.com/pierrec/lz4/v4 There is a command line interface tool to compress and decompress LZ4 files. ``` -go install github.com/pierrec/lz4/v4/cmd/lz4c +go install github.com/pierrec/lz4/v4/cmd/lz4c@latest ``` Usage diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go new file mode 100644 index 00000000..8df0dc76 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go @@ -0,0 +1,222 @@ +package lz4 + +import ( + "errors" + "io" + + "github.com/pierrec/lz4/v4/internal/lz4block" + "github.com/pierrec/lz4/v4/internal/lz4errors" + "github.com/pierrec/lz4/v4/internal/lz4stream" +) + +type crState int + +const ( + crStateInitial crState = iota + crStateReading + crStateFlushing + crStateDone +) + +type CompressingReader struct { + state crState + src io.ReadCloser // source reader + level lz4block.CompressionLevel // how hard to try + frame *lz4stream.Frame // frame being built + in []byte + out ovWriter + handler func(int) +} + +// NewCompressingReader creates a reader which reads compressed data from +// raw stream. This makes it a logical opposite of a normal lz4.Reader. +// We require an io.ReadCloser as an underlying source for compatibility +// with Go's http.Request. +func NewCompressingReader(src io.ReadCloser) *CompressingReader { + zrd := &CompressingReader { + frame: lz4stream.NewFrame(), + } + + _ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone) + zrd.Reset(src) + + return zrd +} + +// Source exposes the underlying source stream for introspection and control. +func (zrd *CompressingReader) Source() io.ReadCloser { + return zrd.src +} + +// Close simply invokes the underlying stream Close method. This method is +// provided for the benefit of Go http client/server, which relies on Close +// for goroutine termination. +func (zrd *CompressingReader) Close() error { + return zrd.src.Close() +} + +// Apply applies useful options to the lz4 encoder. +func (zrd *CompressingReader) Apply(options ...Option) (err error) { + if zrd.state != crStateInitial { + return lz4errors.ErrOptionClosedOrError + } + + zrd.Reset(zrd.src) + + for _, o := range options { + if err = o(zrd); err != nil { + return + } + } + return +} + +func (*CompressingReader) private() {} + +func (zrd *CompressingReader) init() error { + zrd.frame.InitW(&zrd.out, 1, false) + size := zrd.frame.Descriptor.Flags.BlockSizeIndex() + zrd.in = size.Get() + return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out) +} + +// Read allows reading of lz4 compressed data +func (zrd *CompressingReader) Read(p []byte) (n int, err error) { + defer func() { + if err != nil { + zrd.state = crStateDone + } + }() + + if !zrd.out.reset(p) { + return len(p), nil + } + + switch zrd.state { + case crStateInitial: + err = zrd.init() + if err != nil { + return + } + zrd.state = crStateReading + case crStateDone: + return 0, errors.New("This reader is done") + case crStateFlushing: + if zrd.out.dataPos > 0 { + n = zrd.out.dataPos + zrd.out.data = nil + zrd.out.dataPos = 0 + return + } else { + zrd.state = crStateDone + return 0, io.EOF + } + } + + for zrd.state == crStateReading { + block := zrd.frame.Blocks.Block + + var rCount int + rCount, err = io.ReadFull(zrd.src, zrd.in) + switch err { + case nil: + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + + if zrd.out.dataPos == len(zrd.out.data) { + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + } + case io.EOF, io.ErrUnexpectedEOF: // read may be partial + if rCount > 0 { + err = block.Compress( + zrd.frame, zrd.in[ : rCount], zrd.level, + ).Write(zrd.frame, &zrd.out) + zrd.handler(len(block.Data)) + if err != nil { + return + } + } + + err = zrd.frame.CloseW(&zrd.out, 1) + if err != nil { + return + } + zrd.state = crStateFlushing + + n = zrd.out.dataPos + zrd.out.dataPos = 0 + zrd.out.data = nil + return + default: + return + } + } + + err = lz4errors.ErrInternalUnhandledState + return +} + +// Reset makes the stream usable again; mostly handy to reuse lz4 encoder +// instances. +func (zrd *CompressingReader) Reset(src io.ReadCloser) { + zrd.frame.Reset(1) + zrd.state = crStateInitial + zrd.src = src + zrd.out.clear() +} + +type ovWriter struct { + data []byte + ov []byte + dataPos int + ovPos int +} + +func (wr *ovWriter) Write(p []byte) (n int, err error) { + count := copy(wr.data[wr.dataPos : ], p) + wr.dataPos += count + + if count < len(p) { + wr.ov = append(wr.ov, p[count : ]...) + } + + return len(p), nil +} + +func (wr *ovWriter) reset(out []byte) bool { + ovRem := len(wr.ov) - wr.ovPos + + if ovRem >= len(out) { + wr.ovPos += copy(out, wr.ov[wr.ovPos : ]) + return false + } + + if ovRem > 0 { + copy(out, wr.ov[wr.ovPos : ]) + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = ovRem + } else if wr.ovPos > 0 { + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 + wr.dataPos = 0 + } + + wr.data = out + return true +} + +func (wr *ovWriter) clear() { + wr.data = nil + wr.dataPos = 0 + wr.ov = wr.ov[ : 0] + wr.ovPos = 0 +} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go index a1bfa99e..138083d9 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go @@ -8,12 +8,9 @@ const ( Block256Kb Block1Mb Block4Mb + Block8Mb = 2 * Block4Mb ) -// In legacy mode all blocks are compressed regardless -// of the compressed size: use the bound size. -var Block8Mb = uint32(CompressBlockBound(8 << 20)) - var ( BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }} BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }} diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s index c43e8a8d..d2fe11b8 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s @@ -185,7 +185,7 @@ copyMatchTry8: // A 16-at-a-time loop doesn't provide a further speedup. CMP $8, len CCMP HS, offset, $8, $0 - BLO copyMatchLoop1 + BLO copyMatchTry4 AND $7, len, lenRem SUB $8, len @@ -201,8 +201,19 @@ copyMatchLoop8: MOVD tmp2, -8(dst) B copyMatchDone +copyMatchTry4: + // Copy words if both len and offset are at least four. + CMP $4, len + CCMP HS, offset, $4, $0 + BLO copyMatchLoop1 + + MOVWU.P 4(match), tmp2 + MOVWU.P tmp2, 4(dst) + SUBS $4, len + BEQ copyMatchDone + copyMatchLoop1: - // Byte-at-a-time copy for small offsets. + // Byte-at-a-time copy for small offsets <= 3. MOVBU.P 1(match), tmp2 MOVB.P tmp2, 1(dst) SUBS $1, len diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go index 459086f0..e9646546 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -224,9 +224,7 @@ func (b *FrameDataBlock) Close(f *Frame) { func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock { data := b.data if f.isLegacy() { - // In legacy mode, the buffer is sized according to CompressBlockBound, - // but only 8Mb is buffered for compression. - src = src[:8<<20] + data = data[:cap(data)] } else { data = data[:len(src)] // trigger the incompressible flag in CompressBlock } diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go index 46a87380..57a44e76 100644 --- a/vendor/github.com/pierrec/lz4/v4/options.go +++ b/vendor/github.com/pierrec/lz4/v4/options.go @@ -57,6 +57,13 @@ func BlockSizeOption(size BlockSize) Option { } w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) return nil + case *CompressingReader: + size := uint32(size) + if !lz4block.IsValid(size) { + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size) + } + w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size)) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -72,6 +79,9 @@ func BlockChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.BlockChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.BlockChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -87,6 +97,9 @@ func ChecksumOption(flag bool) Option { case *Writer: w.frame.Descriptor.Flags.ContentChecksumSet(flag) return nil + case *CompressingReader: + w.frame.Descriptor.Flags.ContentChecksumSet(flag) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -104,6 +117,10 @@ func SizeOption(size uint64) Option { w.frame.Descriptor.Flags.SizeSet(size > 0) w.frame.Descriptor.ContentSize = size return nil + case *CompressingReader: + w.frame.Descriptor.Flags.SizeSet(size > 0) + w.frame.Descriptor.ContentSize = size + return nil } return lz4errors.ErrOptionNotApplicable } @@ -162,6 +179,14 @@ func CompressionLevelOption(level CompressionLevel) Option { } w.level = lz4block.CompressionLevel(level) return nil + case *CompressingReader: + switch level { + case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9: + default: + return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level) + } + w.level = lz4block.CompressionLevel(level) + return nil } return lz4errors.ErrOptionNotApplicable } @@ -186,6 +211,9 @@ func OnBlockDoneOption(handler func(size int)) Option { case *Reader: rw.handler = handler return nil + case *CompressingReader: + rw.handler = handler + return nil } return lz4errors.ErrOptionNotApplicable } diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go index 77699f2b..4358adee 100644 --- a/vendor/github.com/pierrec/lz4/v4/writer.go +++ b/vendor/github.com/pierrec/lz4/v4/writer.go @@ -150,6 +150,10 @@ func (w *Writer) Flush() (err error) { case writeState: case errorState: return w.state.err + case newState: + if err = w.init(); w.state.next(err) { + return + } default: return nil } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 1feba62c..b5c8bcb3 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -475,6 +475,9 @@ type HistogramOpts struct { // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } // HistogramVecOpts bundles the options to create a HistogramVec metric. @@ -526,7 +529,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.now == nil { opts.now = time.Now } - + if opts.afterFunc == nil { + opts.afterFunc = time.AfterFunc + } h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -536,6 +541,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, lastResetTime: opts.now(), now: opts.now, + afterFunc: opts.afterFunc, } if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { h.upperBounds = DefBuckets @@ -716,9 +722,16 @@ type histogram struct { nativeHistogramMinResetDuration time.Duration // lastResetTime is protected by mtx. It is also used as created timestamp. lastResetTime time.Time + // resetScheduled is protected by mtx. It is true if a reset is + // scheduled for a later time (when nativeHistogramMinResetDuration has + // passed). + resetScheduled bool // now is for testing purposes, by default it's time.Now. now func() time.Time + + // afterFunc is for testing purposes, by default it's time.AfterFunc. + afterFunc func(time.Duration, func()) *time.Timer } func (h *histogram) Desc() *Desc { @@ -874,21 +887,31 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { return } + // One of the other strategies will happen. To undo what they will do as + // soon as enough time has passed to satisfy + // h.nativeHistogramMinResetDuration, schedule a reset at the right time + // if we haven't done so already. + if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled { + h.resetScheduled = true + h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset) + } + if h.maybeWidenZeroBucket(hotCounts, coldCounts) { return } h.doubleBucketWidth(hotCounts, coldCounts) } -// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration -// has been passed. It returns true if the histogram has been reset. The caller -// must have locked h.mtx. +// maybeReset resets the whole histogram if at least +// h.nativeHistogramMinResetDuration has been passed. It returns true if the +// histogram has been reset. The caller must have locked h.mtx. func (h *histogram) maybeReset( hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int, ) bool { // We are using the possibly mocked h.now() rather than // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || + if h.nativeHistogramMinResetDuration == 0 || // No reset configured. + h.resetScheduled || // Do not interefere if a reset is already scheduled. h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { return false } @@ -906,6 +929,29 @@ func (h *histogram) maybeReset( return true } +// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be +// called without having locked h.mtx. +func (h *histogram) reset() { + h.mtx.Lock() + defer h.mtx.Unlock() + + n := atomic.LoadUint64(&h.countAndHotIdx) + hotIdx := n >> 63 + coldIdx := (^n) >> 63 + hot := h.counts[hotIdx] + cold := h.counts[coldIdx] + // Completely reset coldCounts. + h.resetCounts(cold) + // Make coldCounts the new hot counts while resetting countAndHotIdx. + n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63) + count := n & ((1 << 63) - 1) + waitForCooldown(count, hot) + // Finally, reset the formerly hot counts, too. + h.resetCounts(hot) + h.lastResetTime = h.now() + h.resetScheduled = false +} + // maybeWidenZeroBucket widens the zero bucket until it includes the existing // buckets closest to the zero bucket (which could be two, if an equidistant // negative and a positive bucket exists, but usually it's only one bucket to be diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go index b3c4eca2..c21911f2 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -165,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { func validateLabelValues(vals []string, expectedNumberOfValues int) error { if len(vals) != expectedNumberOfValues { + // The call below makes vals escape, copy them to avoid that. + vals := append([]string(nil), vals...) return fmt.Errorf( "%w: expected %d label values but got %d in %#v", errInconsistentCardinality, expectedNumberOfValues, diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index c0152cdb..8c1136ce 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js -// +build !windows,!js +//go:build !windows && !js && !wasip1 +// +build !windows,!js,!wasip1 package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go new file mode 100644 index 00000000..d8d9a6d7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go @@ -0,0 +1,26 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build wasip1 +// +build wasip1 + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (*processCollector) processCollect(chan<- Metric) { + // noop on this platform + return +} diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index 84946b27..cee360db 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -474,6 +474,9 @@ type Histogram struct { NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket. // Positive buckets for the native histogram. + // Use a no-op span (offset 0, length 0) for a native histogram without any + // observations yet and with a zero_threshold of 0. Otherwise, it would be + // indistinguishable from a classic histogram. PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` // Use either "positive_delta" or "positive_count", the former for // regular histograms with integer counts, the latter for float diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 90639781..a909b171 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -14,6 +14,7 @@ package expfmt import ( + "bufio" "fmt" "io" "math" @@ -21,8 +22,8 @@ import ( "net/http" dto "github.com/prometheus/client_model/go" + "google.golang.org/protobuf/encoding/protodelim" - "github.com/matttproud/golang_protobuf_extensions/pbutil" "github.com/prometheus/common/model" ) @@ -86,8 +87,10 @@ type protoDecoder struct { // Decode implements the Decoder interface. func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { + opts := protodelim.UnmarshalOptions{ + MaxSize: -1, + } + if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 7f611ffa..02b7a5e8 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -18,10 +18,11 @@ import ( "io" "net/http" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + "google.golang.org/protobuf/encoding/protodelim" "google.golang.org/protobuf/encoding/prototext" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + dto "github.com/prometheus/client_model/go" ) @@ -120,7 +121,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { case FmtProtoDelim: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) + _, err := protodelim.MarshalTo(w, v) return err }, close: func() error { return nil }, diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 35db1cc9..26490211 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -16,6 +16,7 @@ package expfmt import ( "bufio" "bytes" + "errors" "fmt" "io" "math" @@ -24,8 +25,9 @@ import ( dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" + + "github.com/prometheus/common/model" ) // A stateFn is a function that represents a state in a state machine. By @@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF // stream. Turn this error into something nicer and more // meaningful. (io.EOF is often used as a signal for the legitimate end // of an input stream.) - if p.err == io.EOF { + if p.err != nil && errors.Is(p.err, io.EOF) { p.parseError("unexpected end of input stream") } return p.metricFamiliesByName, p.err @@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn { // which is not an error but the signal that we are done. // Any other error that happens to align with the start of // a line is still an error. - if p.err == io.EOF { + if errors.Is(p.err, io.EOF) { p.err = nil } return nil diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 35e739c7..178fdbaf 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -90,13 +90,13 @@ func (a *Alert) Validate() error { return fmt.Errorf("start time must be before end time") } if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) + return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { return fmt.Errorf("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) + return fmt.Errorf("invalid annotations: %w", err) } return nil } diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go new file mode 100644 index 00000000..447ab8ad --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metadata.go @@ -0,0 +1,28 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// MetricType represents metric type values. +type MetricType string + +const ( + MetricTypeCounter = MetricType("counter") + MetricTypeGauge = MetricType("gauge") + MetricTypeHistogram = MetricType("histogram") + MetricTypeGaugeHistogram = MetricType("gaugehistogram") + MetricTypeSummary = MetricType("summary") + MetricTypeInfo = MetricType("info") + MetricTypeStateset = MetricType("stateset") + MetricTypeUnknown = MetricType("unknown") +) diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 00804b7f..f8c5eaba 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -20,12 +20,10 @@ import ( "strings" ) -var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) -) +// MetricNameRE is a regular expression matching valid metric +// names. Note that the IsValidMetricName function performs the same +// check but faster than a match with this regular expression. +var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) // A Metric is similar to a LabelSet, but the key difference is that a Metric is // a singleton and refers to one and only one stream of samples. diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go index 8762b13c..dc8a0026 100644 --- a/vendor/github.com/prometheus/common/model/signature.go +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -22,10 +22,8 @@ import ( // when calculating their combined hash value (aka signature aka fingerprint). const SeparatorByte byte = 255 -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) +// cache the signature of an empty label set. +var emptyLabelSignature = hashNew() // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a // given label set. (Collisions are possible but unlikely if the number of label diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index bb99889d..910b0b71 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -81,7 +81,7 @@ func (s *Silence) Validate() error { } for _, m := range s.Matchers { if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) + return fmt.Errorf("invalid matcher: %w", err) } } if s.StartsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index 9eb44041..8050637d 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -21,14 +21,12 @@ import ( "strings" ) -var ( - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) +// ZeroSample is the pseudo zero-value of Sample used to signal a +// non-existing sample. It is a Sample with timestamp Earliest, value 0.0, +// and metric nil. Note that the natural zero value of Sample has a timestamp +// of 0, which is possible to appear in a real Sample and thus not suitable +// to signal a non-existing Sample. +var ZeroSample = Sample{Timestamp: Earliest} // Sample is a sample pair associated with a metric. A single sample must either // define Value or Histogram but not both. Histogram == nil implies the Value @@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error { value, err := strconv.ParseFloat(f, 64) if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) + return fmt.Errorf("error parsing sample value: %w", err) } s.Value = SampleValue(value) return nil diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index 0f615a70..ae35cc2a 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -20,14 +20,12 @@ import ( "strconv" ) -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} -) +// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a +// non-existing sample pair. It is a SamplePair with timestamp Earliest and +// value 0.0. Note that the natural zero value of SamplePair has a timestamp +// of 0, which is possible to appear in a real SamplePair and thus not +// suitable to signal a non-existing SamplePair. +var ZeroSamplePair = SamplePair{Timestamp: Earliest} // A SampleValue is a representation of a value for a given sample at a given // time. diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go index 00caa0ba..28884dbc 100644 --- a/vendor/github.com/prometheus/common/version/info.go +++ b/vendor/github.com/prometheus/common/version/info.go @@ -48,12 +48,12 @@ func NewCollector(program string) prometheus.Collector { ), ConstLabels: prometheus.Labels{ "version": Version, - "revision": getRevision(), + "revision": GetRevision(), "branch": Branch, "goversion": GoVersion, "goos": GoOS, "goarch": GoArch, - "tags": getTags(), + "tags": GetTags(), }, }, func() float64 { return 1 }, @@ -75,13 +75,13 @@ func Print(program string) string { m := map[string]string{ "program": program, "version": Version, - "revision": getRevision(), + "revision": GetRevision(), "branch": Branch, "buildUser": BuildUser, "buildDate": BuildDate, "goVersion": GoVersion, "platform": GoOS + "/" + GoArch, - "tags": getTags(), + "tags": GetTags(), } t := template.Must(template.New("version").Parse(versionInfoTmpl)) @@ -94,10 +94,10 @@ func Print(program string) string { // Info returns version, branch and revision information. func Info() string { - return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision()) + return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, GetRevision()) } // BuildContext returns goVersion, platform, buildUser and buildDate information. func BuildContext() string { - return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, getTags()) + return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, GetTags()) } diff --git a/vendor/github.com/prometheus/common/version/info_default.go b/vendor/github.com/prometheus/common/version/info_default.go index 8eb3a0bf..684996f1 100644 --- a/vendor/github.com/prometheus/common/version/info_default.go +++ b/vendor/github.com/prometheus/common/version/info_default.go @@ -16,7 +16,7 @@ package version -func getRevision() string { +func GetRevision() string { return Revision } diff --git a/vendor/github.com/prometheus/common/version/info_go118.go b/vendor/github.com/prometheus/common/version/info_go118.go index bfc7d410..992623c6 100644 --- a/vendor/github.com/prometheus/common/version/info_go118.go +++ b/vendor/github.com/prometheus/common/version/info_go118.go @@ -18,17 +18,19 @@ package version import "runtime/debug" -var computedRevision string -var computedTags string +var ( + computedRevision string + computedTags string +) -func getRevision() string { +func GetRevision() string { if Revision != "" { return Revision } return computedRevision } -func getTags() string { +func GetTags() string { return computedTags } diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common index 0ce7ea46..062a2818 100644 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ b/vendor/github.com/prometheus/procfs/Makefile.common @@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_ SKIP_GOLANGCI_LINT := GOLANGCI_LINT := GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.53.3 +GOLANGCI_LINT_VERSION ?= v1.54.2 # golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # windows isn't included here because of the path separator being different. ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go index 13d74e39..134767d6 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build netbsd || openbsd || solaris || windows || nostatfs -// +build netbsd openbsd solaris windows nostatfs +//go:build !freebsd && !linux +// +build !freebsd,!linux package procfs diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go index bee15144..80df79c3 100644 --- a/vendor/github.com/prometheus/procfs/fs_statfs_type.go +++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !netbsd && !openbsd && !solaris && !windows && !nostatfs -// +build !netbsd,!openbsd,!solaris,!windows,!nostatfs +//go:build freebsd || linux +// +build freebsd linux package procfs diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go index 852c8c4a..9d8af6db 100644 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -44,6 +44,14 @@ const ( fieldTransport11TCPLen = 13 fieldTransport11UDPLen = 10 + + // kernel version >= 4.14 MaxLen + // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393 + fieldTransport11RDMAMaxLen = 28 + + // kernel version <= 4.2 MinLen + // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331 + fieldTransport11RDMAMinLen = 20 ) // A Mount is a device mount parsed from /proc/[pid]/mountstats. @@ -233,6 +241,33 @@ type NFSTransportStats struct { // A running counter, incremented on each request as the current size of the // pending queue. CumulativePendingQueue uint64 + + // Stats below only available with stat version 1.1. + // Transport over RDMA + + // accessed when sending a call + ReadChunkCount uint64 + WriteChunkCount uint64 + ReplyChunkCount uint64 + TotalRdmaRequest uint64 + + // rarely accessed error counters + PullupCopyCount uint64 + HardwayRegisterCount uint64 + FailedMarshalCount uint64 + BadReplyCount uint64 + MrsRecovered uint64 + MrsOrphaned uint64 + MrsAllocated uint64 + EmptySendctxQ uint64 + + // accessed when receiving a reply + TotalRdmaReply uint64 + FixupCopyCount uint64 + ReplyWaitsForSend uint64 + LocalInvNeeded uint64 + NomsgCallCount uint64 + BcallCount uint64 } // parseMountStats parses a /proc/[pid]/mountstats file and returns a slice @@ -587,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats expectedLength = fieldTransport11TCPLen } else if protocol == "udp" { expectedLength = fieldTransport11UDPLen + } else if protocol == "rdma" { + expectedLength = fieldTransport11RDMAMinLen } else { return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss) } - if len(ss) != expectedLength { - return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v", ErrFileParse, ss) + if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) || + (protocol == "rdma" && len(ss) < expectedLength) { + return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol) } default: - return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q", ErrFileParse, statVersion) + return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol) } // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay @@ -604,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // Note: slice length must be set to length of v1.1 stats to avoid a panic when // only v1.0 stats are present. // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) + // + // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen + ns := make([]uint64, fieldTransport11RDMAMaxLen+3) for i, s := range ss { n, err := strconv.ParseUint(s, 10, 64) if err != nil { @@ -622,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats // we set them to 0 here. if protocol == "udp" { ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) + } else if protocol == "tcp" { + ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...) + } else if protocol == "rdma" { + ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...) } return &NFSTransportStats{ + // NFS xprt over tcp or udp Protocol: protocol, Port: ns[0], Bind: ns[1], @@ -636,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats BadTransactionIDs: ns[7], CumulativeActiveRequests: ns[8], CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], + + // NFS xprt over tcp or udp + // And statVersion 1.1 + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + + // NFS xprt over rdma + // And stat Version 1.1 + ReadChunkCount: ns[13], + WriteChunkCount: ns[14], + ReplyChunkCount: ns[15], + TotalRdmaRequest: ns[16], + PullupCopyCount: ns[17], + HardwayRegisterCount: ns[18], + FailedMarshalCount: ns[19], + BadReplyCount: ns[20], + MrsRecovered: ns[21], + MrsOrphaned: ns[22], + MrsAllocated: ns[23], + EmptySendctxQ: ns[24], + TotalRdmaReply: ns[25], + FixupCopyCount: ns[26], + ReplyWaitsForSend: ns[27], + LocalInvNeeded: ns[28], + NomsgCallCount: ns[29], + BcallCount: ns[30], }, nil } diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go index 4b7933e4..fa761b35 100644 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go @@ -26,6 +26,7 @@ var ( rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) + rIno = regexp.MustCompile(`^ino:\s+(\d+)$`) rInotify = regexp.MustCompile(`^inotify`) rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) ) @@ -40,6 +41,8 @@ type ProcFDInfo struct { Flags string // Mount point ID MntID string + // Inode number + Ino string // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) InotifyInfos []InotifyInfo } @@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { return nil, err } - var text, pos, flags, mntid string + var text, pos, flags, mntid, ino string var inotify []InotifyInfo scanner := bufio.NewScanner(bytes.NewReader(data)) @@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { flags = rFlags.FindStringSubmatch(text)[1] } else if rMntID.MatchString(text) { mntid = rMntID.FindStringSubmatch(text)[1] + } else if rIno.MatchString(text) { + ino = rIno.FindStringSubmatch(text)[1] } else if rInotify.MatchString(text) { newInotify, err := parseInotifyInfo(text) if err != nil { @@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { Pos: pos, Flags: flags, MntID: mntid, + Ino: ino, InotifyInfos: inotify, } diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go index 727549a1..7e75c286 100644 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ b/vendor/github.com/prometheus/procfs/proc_maps.go @@ -63,17 +63,17 @@ type ProcMap struct { // parseDevice parses the device token of a line and converts it to a dev_t // (mkdev) like structure. func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("%w: unexpected number of fields, expected: 2, got: %q", ErrFileParse, len(toks)) + i := strings.Index(s, ":") + if i == -1 { + return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s) } - major, err := strconv.ParseUint(toks[0], 16, 0) + major, err := strconv.ParseUint(s[0:i], 16, 0) if err != nil { return 0, err } - minor, err := strconv.ParseUint(toks[1], 16, 0) + minor, err := strconv.ParseUint(s[i+1:], 16, 0) if err != nil { return 0, err } @@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) { // parseAddresses parses the start-end address. func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("%w: invalid address", ErrFileParse) + idx := strings.Index(s, "-") + if idx == -1 { + return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s) } - saddr, err := parseAddress(toks[0]) + saddr, err := parseAddress(s[0:idx]) if err != nil { return 0, 0, err } - eaddr, err := parseAddress(toks[1]) + eaddr, err := parseAddress(s[idx+1:]) if err != nil { return 0, 0, err } diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go index c055d075..46307f57 100644 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ b/vendor/github.com/prometheus/procfs/proc_status.go @@ -23,7 +23,7 @@ import ( ) // ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. +// read from /proc/[pid]/status. type ProcStatus struct { // The process ID. PID int @@ -32,6 +32,8 @@ type ProcStatus struct { // Thread group ID. TGID int + // List of Pid namespace. + NSpids []uint64 // Peak virtual memory size. VmPeak uint64 // nolint:revive @@ -127,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt copy(s.UIDs[:], strings.Split(vString, "\t")) case "Gid": copy(s.GIDs[:], strings.Split(vString, "\t")) + case "NSpid": + s.NSpids = calcNSPidsList(vString) case "VmPeak": s.VmPeak = vUintBytes case "VmSize": @@ -200,3 +204,18 @@ func calcCpusAllowedList(cpuString string) []uint64 { sort.Slice(g, func(i, j int) bool { return g[i] < g[j] }) return g } + +func calcNSPidsList(nspidsString string) []uint64 { + s := strings.Split(nspidsString, " ") + var nspids []uint64 + + for _, nspid := range s { + nspid, _ := strconv.ParseUint(nspid, 10, 64) + if nspid == 0 { + continue + } + nspids = append(nspids, nspid) + } + + return nspids +} diff --git a/vendor/github.com/segmentio/go-athena/README.md b/vendor/github.com/segmentio/go-athena/README.md index dbd71c83..23ec64ca 100644 --- a/vendor/github.com/segmentio/go-athena/README.md +++ b/vendor/github.com/segmentio/go-athena/README.md @@ -1,6 +1,9 @@ [![](https://godoc.org/github.com/segmentio/go-athena?status.svg)](https://godoc.org/github.com/segmentio/go-athena) # go-athena +> **Note** +> Segment has paused maintenance on this project, but may return it to an active status in the future. Issues and pull requests from external contributors are not being considered, although internal contributions may appear from time to time. The project remains available under its open source license for anyone to use. + go-athena is a simple Golang [database/sql] driver for [Amazon Athena](https://aws.amazon.com/athena/). ```go diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index b042c896..d1d4a85f 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -9,7 +9,7 @@ the last thing you want from your Logging library (again...). This does not mean Logrus is dead. Logrus will continue to be maintained for security, (backwards compatible) bug fixes, and performance (where we are -limited by the interface). +limited by the interface). I believe Logrus' biggest contribution is to have played a part in today's widespread use of structured logging in Golang. There doesn't seem to be a @@ -43,7 +43,7 @@ plain text): With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash or Splunk: -```json +```text {"animal":"walrus","level":"info","msg":"A group of walrus emerges from the ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} @@ -99,7 +99,7 @@ time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcr ``` Note that this does add measurable overhead - the cost will depend on the version of Go, but is between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your -environment via benchmarks: +environment via benchmarks: ``` go test -bench=.*CallerTracing ``` @@ -317,6 +317,8 @@ log.SetLevel(log.InfoLevel) It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose environment if your application has that. +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + #### Entries Besides the fields added with `WithField` or `WithFields` some fields are diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 72e8e3a1..074fd4b8 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -4,6 +4,7 @@ import ( "bufio" "io" "runtime" + "strings" ) // Writer at INFO level. See WriterLevel for details. @@ -20,15 +21,18 @@ func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { return NewEntry(logger).WriterLevel(level) } +// Writer returns an io.Writer that writes to the logger at the info log level func (entry *Entry) Writer() *io.PipeWriter { return entry.WriterLevel(InfoLevel) } +// WriterLevel returns an io.Writer that writes to the logger at the given log level func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + // Determine which log function to use based on the specified log level switch level { case TraceLevel: printFunc = entry.Trace @@ -48,23 +52,51 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { printFunc = entry.Print } + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected runtime.SetFinalizer(writer, writerFinalizer) return writer } +// writerScanner scans the input from the reader and writes it to the logger func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function for scanner.Scan() { - printFunc(scanner.Text()) + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) } + + // If there was an error while scanning the input, log an error if err := scanner.Err(); err != nil { entry.Errorf("Error while reading from Writer: %s", err) } + + // Close the reader when we are done reader.Close() } +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected func writerFinalizer(writer *io.PipeWriter) { writer.Close() } diff --git a/vendor/github.com/snowflakedb/gosnowflake/README.md b/vendor/github.com/snowflakedb/gosnowflake/README.md index 41cf742c..5056d80d 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/README.md +++ b/vendor/github.com/snowflakedb/gosnowflake/README.md @@ -1,5 +1,8 @@ # Go Snowflake Driver + + Coverage + @@ -23,6 +26,12 @@ The latest driver requires the [Go language](https://golang.org/) 1.19 or higher # Installation +If you don't have a project initialized, set it up. + +```sh +go mod init example.com/snowflake +``` + Get Gosnowflake source code, if not installed. ```sh diff --git a/vendor/github.com/snowflakedb/gosnowflake/arrow_chunk.go b/vendor/github.com/snowflakedb/gosnowflake/arrow_chunk.go index 344774af..1532c3ee 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/arrow_chunk.go +++ b/vendor/github.com/snowflakedb/gosnowflake/arrow_chunk.go @@ -7,9 +7,9 @@ import ( "encoding/base64" "time" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/ipc" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/ipc" + "github.com/apache/arrow/go/v14/arrow/memory" ) type arrowResultChunk struct { @@ -57,7 +57,7 @@ func (arc *arrowResultChunk) decodeArrowBatch(scd *snowflakeChunkDownloader) (*[ for arc.reader.Next() { rawRecord := arc.reader.Record() - record, err := arrowToRecord(rawRecord, arc.allocator, scd.RowSet.RowType, arc.loc) + record, err := arrowToRecord(scd.ctx, rawRecord, arc.allocator, scd.RowSet.RowType, arc.loc) if err != nil { return nil, err } @@ -68,15 +68,15 @@ func (arc *arrowResultChunk) decodeArrowBatch(scd *snowflakeChunkDownloader) (*[ } // Build arrow chunk based on RowSet of base64 -func buildFirstArrowChunk(rowsetBase64 string, loc *time.Location, alloc memory.Allocator) arrowResultChunk { +func buildFirstArrowChunk(rowsetBase64 string, loc *time.Location, alloc memory.Allocator) (arrowResultChunk, error) { rowSetBytes, err := base64.StdEncoding.DecodeString(rowsetBase64) if err != nil { - return arrowResultChunk{} + return arrowResultChunk{}, err } rr, err := ipc.NewReader(bytes.NewReader(rowSetBytes), ipc.WithAllocator(alloc)) if err != nil { - return arrowResultChunk{} + return arrowResultChunk{}, err } - return arrowResultChunk{rr, 0, loc, alloc} + return arrowResultChunk{rr, 0, loc, alloc}, nil } diff --git a/vendor/github.com/snowflakedb/gosnowflake/async.go b/vendor/github.com/snowflakedb/gosnowflake/async.go index 5c477dc1..e5d7e523 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/async.go +++ b/vendor/github.com/snowflakedb/gosnowflake/async.go @@ -63,28 +63,49 @@ func (sr *snowflakeRestful) getAsync( defer close(errChannel) token, _, _ := sr.TokenAccessor.GetTokens() headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, token) - resp, err := sr.FuncGet(ctx, sr, URL, headers, timeout) - if err != nil { - logger.WithContext(ctx).Errorf("failed to get response. err: %v", err) - sfError.Message = err.Error() - errChannel <- sfError - return err - } - if resp.Body != nil { + + var err error + var respd execResponse + retry := 0 + retryPattern := []int32{1, 1, 2, 3, 4, 8, 10} + + for { + resp, err := sr.FuncGet(ctx, sr, URL, headers, timeout) + if err != nil { + logger.WithContext(ctx).Errorf("failed to get response. err: %v", err) + sfError.Message = err.Error() + errChannel <- sfError + return err + } defer resp.Body.Close() - } - respd := execResponse{} - err = json.NewDecoder(resp.Body).Decode(&respd) - resp.Body.Close() - if err != nil { - logger.WithContext(ctx).Errorf("failed to decode JSON. err: %v", err) - sfError.Message = err.Error() - errChannel <- sfError - return err + respd = execResponse{} // reset the response + err = json.NewDecoder(resp.Body).Decode(&respd) + if err != nil { + logger.WithContext(ctx).Errorf("failed to decode JSON. err: %v", err) + sfError.Message = err.Error() + errChannel <- sfError + return err + } + if respd.Code != queryInProgressAsyncCode { + // If the query takes longer than 45 seconds to complete the results are not returned. + // If the query is still in progress after 45 seconds, retry the request to the /results endpoint. + // For all other scenarios continue processing results response + break + } else { + // Sleep before retrying get result request. Exponential backoff up to 5 seconds. + // Once 5 second backoff is reached it will keep retrying with this sleeptime. + sleepTime := time.Millisecond * time.Duration(500*retryPattern[retry]) + logger.WithContext(ctx).Infof("Query execution still in progress. Sleep for %v ms", sleepTime) + time.Sleep(sleepTime) + } + if retry < len(retryPattern)-1 { + retry++ + } + } - sc := &snowflakeConn{rest: sr, cfg: cfg} + sc := &snowflakeConn{rest: sr, cfg: cfg, queryContextCache: (&queryContextCache{}).init(), currentTimeProvider: defaultTimeProvider} if respd.Success { if resType == execResultType { res.insertID = -1 @@ -113,12 +134,17 @@ func (sr *snowflakeRestful) getAsync( if isMultiStmt(&respd.Data) { if err = sc.handleMultiQuery(ctx, respd.Data, rows); err != nil { rows.errChannel <- err + close(rows.errChannel) return err } } else { rows.addDownloader(populateChunkDownloader(ctx, sc, respd.Data)) } - rows.ChunkDownloader.start() + if err = rows.ChunkDownloader.start(); err != nil { + rows.errChannel <- err + close(rows.errChannel) + return err + } rows.errChannel <- nil // mark query status complete } } else { diff --git a/vendor/github.com/snowflakedb/gosnowflake/auth.go b/vendor/github.com/snowflakedb/gosnowflake/auth.go index e58a2b39..9493459f 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/auth.go +++ b/vendor/github.com/snowflakedb/gosnowflake/auth.go @@ -70,6 +70,9 @@ func determineAuthenticatorType(cfg *Config, value string) error { } else if upperCaseValue == AuthTypeUsernamePasswordMFA.String() { cfg.Authenticator = AuthTypeUsernamePasswordMFA return nil + } else if upperCaseValue == AuthTypeTokenAccessor.String() { + cfg.Authenticator = AuthTypeTokenAccessor + return nil } else { // possibly Okta case oktaURLString, err := url.QueryUnescape(lowerCaseValue) @@ -212,9 +215,10 @@ type authResponse struct { func postAuth( ctx context.Context, sr *snowflakeRestful, + client *http.Client, params *url.Values, headers map[string]string, - body []byte, + bodyCreator bodyCreatorType, timeout time.Duration) ( data *authResponse, err error) { params.Add(requestIDKey, getOrGenerateRequestIDFromContext(ctx).String()) @@ -222,7 +226,7 @@ func postAuth( fullURL := sr.getFullURL(loginRequestPath, params) logger.Infof("full URL: %v", fullURL) - resp, err := sr.FuncPost(ctx, sr, fullURL, headers, body, timeout, true) + resp, err := sr.FuncAuthPost(ctx, client, fullURL, headers, bodyCreator, timeout, sr.MaxRetryCount) if err != nil { return nil, err } @@ -231,7 +235,7 @@ func postAuth( var respd authResponse err = json.NewDecoder(resp.Body).Decode(&respd) if err != nil { - logger.Error("failed to decode JSON. err: %v", err) + logger.Errorf("failed to decode JSON. err: %v", err) return nil, err } return &respd, nil @@ -275,6 +279,8 @@ func getHeaders() map[string]string { headers := make(map[string]string) headers[httpHeaderContentType] = headerContentTypeApplicationJSON headers[httpHeaderAccept] = headerAcceptTypeApplicationSnowflake + headers[httpClientAppID] = clientType + headers[httpClientAppVersion] = SnowflakeGoDriverVersion headers[httpHeaderUserAgent] = userAgent return headers } @@ -286,6 +292,23 @@ func authenticate( samlResponse []byte, proofKey []byte, ) (resp *authResponseMain, err error) { + if sc.cfg.Authenticator == AuthTypeTokenAccessor { + logger.Info("Bypass authentication using existing token from token accessor") + sessionInfo := authResponseSessionInfo{ + DatabaseName: sc.cfg.Database, + SchemaName: sc.cfg.Schema, + WarehouseName: sc.cfg.Warehouse, + RoleName: sc.cfg.Role, + } + token, masterToken, sessionID := sc.cfg.TokenAccessor.GetTokens() + return &authResponseMain{ + Token: token, + MasterToken: masterToken, + SessionID: sessionID, + SessionInfo: sessionInfo, + }, nil + } + headers := getHeaders() clientEnvironment := authRequestClientEnvironment{ Application: sc.cfg.Application, @@ -309,6 +332,67 @@ func authenticate( if sc.cfg.ClientStoreTemporaryCredential == ConfigBoolTrue { sessionParameters[clientStoreTemporaryCredential] = true } + bodyCreator := func() ([]byte, error) { + return createRequestBody(sc, sessionParameters, clientEnvironment, proofKey, samlResponse) + } + + params := &url.Values{} + if sc.cfg.Database != "" { + params.Add("databaseName", sc.cfg.Database) + } + if sc.cfg.Schema != "" { + params.Add("schemaName", sc.cfg.Schema) + } + if sc.cfg.Warehouse != "" { + params.Add("warehouse", sc.cfg.Warehouse) + } + if sc.cfg.Role != "" { + params.Add("roleName", sc.cfg.Role) + } + + logger.WithContext(sc.ctx).Infof("PARAMS for Auth: %v, %v, %v, %v, %v, %v", + params, sc.rest.Protocol, sc.rest.Host, sc.rest.Port, sc.rest.LoginTimeout, sc.cfg.Authenticator.String()) + + respd, err := sc.rest.FuncPostAuth(ctx, sc.rest, sc.rest.getClientFor(sc.cfg.Authenticator), params, headers, bodyCreator, sc.rest.LoginTimeout) + if err != nil { + return nil, err + } + if !respd.Success { + logger.Errorln("Authentication FAILED") + sc.rest.TokenAccessor.SetTokens("", "", -1) + if sessionParameters[clientRequestMfaToken] == true { + deleteCredential(sc, mfaToken) + } + if sessionParameters[clientStoreTemporaryCredential] == true { + deleteCredential(sc, idToken) + } + code, err := strconv.Atoi(respd.Code) + if err != nil { + code = -1 + return nil, err + } + return nil, (&SnowflakeError{ + Number: code, + SQLState: SQLStateConnectionRejected, + Message: respd.Message, + }).exceptionTelemetry(sc) + } + logger.Info("Authentication SUCCESS") + sc.rest.TokenAccessor.SetTokens(respd.Data.Token, respd.Data.MasterToken, respd.Data.SessionID) + if sessionParameters[clientRequestMfaToken] == true { + token := respd.Data.MfaToken + setCredential(sc, mfaToken, token) + } + if sessionParameters[clientStoreTemporaryCredential] == true { + token := respd.Data.IDToken + setCredential(sc, idToken, token) + } + return &respd.Data, nil +} + +func createRequestBody(sc *snowflakeConn, sessionParameters map[string]interface{}, + clientEnvironment authRequestClientEnvironment, proofKey []byte, samlResponse []byte, +) ([]byte, error) { requestMain := authRequestData{ ClientAppID: clientType, ClientAppVersion: SnowflakeGoDriverVersion, @@ -361,83 +445,16 @@ func authenticate( if sc.cfg.MfaToken != "" { requestMain.Token = sc.cfg.MfaToken } - case AuthTypeTokenAccessor: - logger.Info("Bypass authentication using existing token from token accessor") - sessionInfo := authResponseSessionInfo{ - DatabaseName: sc.cfg.Database, - SchemaName: sc.cfg.Schema, - WarehouseName: sc.cfg.Warehouse, - RoleName: sc.cfg.Role, - } - token, masterToken, sessionID := sc.cfg.TokenAccessor.GetTokens() - return &authResponseMain{ - Token: token, - MasterToken: masterToken, - SessionID: sessionID, - SessionInfo: sessionInfo, - }, nil } authRequest := authRequest{ Data: requestMain, } - params := &url.Values{} - if sc.cfg.Database != "" { - params.Add("databaseName", sc.cfg.Database) - } - if sc.cfg.Schema != "" { - params.Add("schemaName", sc.cfg.Schema) - } - if sc.cfg.Warehouse != "" { - params.Add("warehouse", sc.cfg.Warehouse) - } - if sc.cfg.Role != "" { - params.Add("roleName", sc.cfg.Role) - } - jsonBody, err := json.Marshal(authRequest) - if err != nil { - return - } - - logger.WithContext(sc.ctx).Infof("PARAMS for Auth: %v, %v, %v, %v, %v, %v", - params, sc.rest.Protocol, sc.rest.Host, sc.rest.Port, sc.rest.LoginTimeout, sc.cfg.Authenticator.String()) - - respd, err := sc.rest.FuncPostAuth(ctx, sc.rest, params, headers, jsonBody, sc.rest.LoginTimeout) if err != nil { return nil, err } - if !respd.Success { - logger.Errorln("Authentication FAILED") - sc.rest.TokenAccessor.SetTokens("", "", -1) - if sessionParameters[clientRequestMfaToken] == true { - deleteCredential(sc, mfaToken) - } - if sessionParameters[clientStoreTemporaryCredential] == true { - deleteCredential(sc, idToken) - } - code, err := strconv.Atoi(respd.Code) - if err != nil { - code = -1 - return nil, err - } - return nil, (&SnowflakeError{ - Number: code, - SQLState: SQLStateConnectionRejected, - Message: respd.Message, - }).exceptionTelemetry(sc) - } - logger.Info("Authentication SUCCESS") - sc.rest.TokenAccessor.SetTokens(respd.Data.Token, respd.Data.MasterToken, respd.Data.SessionID) - if sessionParameters[clientRequestMfaToken] == true { - token := respd.Data.MfaToken - setCredential(sc, mfaToken, token) - } - if sessionParameters[clientStoreTemporaryCredential] == true { - token := respd.Data.IDToken - setCredential(sc, idToken, token) - } - return &respd.Data, nil + return jsonBody, nil } // Generate a JWT token in string given the configuration @@ -506,7 +523,8 @@ func authenticateWithConfig(sc *snowflakeConn) error { sc.cfg.Application, sc.cfg.Account, sc.cfg.User, - sc.cfg.Password) + sc.cfg.Password, + sc.cfg.ExternalBrowserTimeout) if err != nil { sc.cleanup() return err diff --git a/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go b/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go index b6dbc38c..373173f5 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go +++ b/vendor/github.com/snowflakedb/gosnowflake/authexternalbrowser.go @@ -6,6 +6,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "log" @@ -14,6 +15,7 @@ import ( "net/url" "strconv" "strings" + "time" "github.com/pkg/browser" ) @@ -54,13 +56,18 @@ func buildResponse(application string) bytes.Buffer { // This opens a socket that listens on all available unicast // and any anycast IP addresses locally. By specifying "0", we are // able to bind to a free port. -func bindToPort() (net.Listener, error) { +func createLocalTCPListener() (*net.TCPListener, error) { l, err := net.Listen("tcp", "localhost:0") if err != nil { - logger.Infof("unable to bind to a port on localhost, err: %v", err) return nil, err } - return l, nil + + tcpListener, ok := l.(*net.TCPListener) + if !ok { + return nil, fmt.Errorf("failed to assert type as *net.TCPListener") + } + + return tcpListener, nil } // Opens a browser window (or new tab) with the configured IDP Url. @@ -165,6 +172,34 @@ func getTokenFromResponse(response string) (string, error) { return token, nil } +type authenticateByExternalBrowserResult struct { + escapedSamlResponse []byte + proofKey []byte + err error +} + +func authenticateByExternalBrowser( + ctx context.Context, + sr *snowflakeRestful, + authenticator string, + application string, + account string, + user string, + password string, + externalBrowserTimeout time.Duration, +) ([]byte, []byte, error) { + resultChan := make(chan authenticateByExternalBrowserResult, 1) + go func() { + resultChan <- doAuthenticateByExternalBrowser(ctx, sr, authenticator, application, account, user, password) + }() + select { + case <-time.After(externalBrowserTimeout): + return nil, nil, errors.New("authentication timed out") + case result := <-resultChan: + return result.escapedSamlResponse, result.proofKey, result.err + } +} + // Authentication by an external browser takes place via the following: // - the golang snowflake driver communicates to Snowflake that the user wishes to // authenticate via external browser @@ -174,7 +209,7 @@ func getTokenFromResponse(response string) (string, error) { // - user authenticates at the IDP, and is redirected to Snowflake // - Snowflake directs the user back to the driver // - authenticate is complete! -func authenticateByExternalBrowser( +func doAuthenticateByExternalBrowser( ctx context.Context, sr *snowflakeRestful, authenticator string, @@ -182,10 +217,10 @@ func authenticateByExternalBrowser( account string, user string, password string, -) ([]byte, []byte, error) { - l, err := bindToPort() +) authenticateByExternalBrowserResult { + l, err := createLocalTCPListener() if err != nil { - return nil, nil, err + return authenticateByExternalBrowserResult{nil, nil, err} } defer l.Close() @@ -193,11 +228,11 @@ func authenticateByExternalBrowser( idpURL, proofKey, err := getIdpURLProofKey( ctx, sr, authenticator, application, account, callbackPort) if err != nil { - return nil, nil, err + return authenticateByExternalBrowserResult{nil, nil, err} } if err = openBrowser(idpURL); err != nil { - return nil, nil, err + return authenticateByExternalBrowserResult{nil, nil, err} } encodedSamlResponseChan := make(chan string) @@ -253,13 +288,13 @@ func authenticateByExternalBrowser( errFromGoroutine = <-errChan if errFromGoroutine != nil { - return nil, nil, errFromGoroutine + return authenticateByExternalBrowserResult{nil, nil, errFromGoroutine} } escapedSamlResponse, err := url.QueryUnescape(encodedSamlResponse) if err != nil { logger.WithContext(ctx).Errorf("unable to unescape saml response. err: %v", err) - return nil, nil, err + return authenticateByExternalBrowserResult{nil, nil, err} } - return []byte(escapedSamlResponse), []byte(proofKey), nil + return authenticateByExternalBrowserResult{[]byte(escapedSamlResponse), []byte(proofKey), nil} } diff --git a/vendor/github.com/snowflakedb/gosnowflake/authokta.go b/vendor/github.com/snowflakedb/gosnowflake/authokta.go index 1706f2d6..818753af 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/authokta.go +++ b/vendor/github.com/snowflakedb/gosnowflake/authokta.go @@ -111,8 +111,8 @@ func authenticateBySAML( if tokenURL, err = url.Parse(respd.Data.TokenURL); err != nil { return nil, fmt.Errorf("failed to parse token URL. %v", respd.Data.TokenURL) } - if ssoURL, err = url.Parse(respd.Data.TokenURL); err != nil { - return nil, fmt.Errorf("failed to parse ssoURL URL. %v", respd.Data.SSOURL) + if ssoURL, err = url.Parse(respd.Data.SSOURL); err != nil { + return nil, fmt.Errorf("failed to parse SSO URL. %v", respd.Data.SSOURL) } if !isPrefixEqual(oktaURL, ssoURL) || !isPrefixEqual(oktaURL, tokenURL) { return nil, &SnowflakeError{ @@ -216,7 +216,7 @@ func postAuthSAML( fullURL := sr.getFullURL(authenticatorRequestPath, params) logger.Infof("fullURL: %v", fullURL) - resp, err := sr.FuncPost(ctx, sr, fullURL, headers, body, timeout, true) + resp, err := sr.FuncPost(ctx, sr, fullURL, headers, body, timeout, defaultTimeProvider, nil) if err != nil { return nil, err } @@ -274,7 +274,7 @@ func postAuthOKTA( if err != nil { return nil, err } - resp, err := sr.FuncPost(ctx, sr, targetURL, headers, body, timeout, false) + resp, err := sr.FuncPost(ctx, sr, targetURL, headers, body, timeout, defaultTimeProvider, nil) if err != nil { return nil, err } diff --git a/vendor/github.com/snowflakedb/gosnowflake/azure_storage_client.go b/vendor/github.com/snowflakedb/gosnowflake/azure_storage_client.go index 9a1fc3e4..dce7f8d3 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/azure_storage_client.go +++ b/vendor/github.com/snowflakedb/gosnowflake/azure_storage_client.go @@ -49,6 +49,9 @@ func (util *snowflakeAzureClient) createClient(info *execResponseStageInfo, _ bo MaxRetries: 60, RetryDelay: 2 * time.Second, }, + Transport: &http.Client{ + Transport: SnowflakeTransport, + }, }, }) if err != nil { @@ -204,7 +207,7 @@ func (util *snowflakeAzureClient) uploadFile( }) } else { var f *os.File - f, err = os.OpenFile(dataFile, os.O_RDONLY, os.ModePerm) + f, err = os.Open(dataFile) if err != nil { return err } @@ -273,7 +276,7 @@ func (util *snowflakeAzureClient) nativeDownloadFile( if meta.mockAzureClient != nil { blobClient = meta.mockAzureClient } - f, err := os.OpenFile(fullDstFileName, os.O_CREATE|os.O_WRONLY, os.ModePerm) + f, err := os.OpenFile(fullDstFileName, os.O_CREATE|os.O_WRONLY, readWriteFileMode) if err != nil { return err } diff --git a/vendor/github.com/snowflakedb/gosnowflake/bind_uploader.go b/vendor/github.com/snowflakedb/gosnowflake/bind_uploader.go index a8407203..74029095 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/bind_uploader.go +++ b/vendor/github.com/snowflakedb/gosnowflake/bind_uploader.go @@ -5,6 +5,7 @@ package gosnowflake import ( "bytes" "context" + "database/sql" "database/sql/driver" "fmt" "reflect" @@ -195,7 +196,10 @@ func (sc *snowflakeConn) processBindings( ctx: ctx, stagePath: "@" + bindStageName + "/" + requestID.String(), } - uploader.upload(bindings) + _, err := uploader.upload(bindings) + if err != nil { + return err + } req.Bindings = nil req.BindStage = uploader.stagePath } else { @@ -215,6 +219,10 @@ func getBindValues(bindings []driver.NamedValue) (map[string]execBindParameter, var err error bindValues := make(map[string]execBindParameter, len(bindings)) for _, binding := range bindings { + if tnt, ok := binding.Value.(TypedNullTime); ok { + tsmode = convertTzTypeToSnowflakeType(tnt.TzType) + binding.Value = tnt.Time + } t := goTypeToSnowflake(binding.Value, tsmode) if t == changeType { tsmode, err = dataTypeMode(binding.Value) @@ -235,7 +243,7 @@ func getBindValues(bindings []driver.NamedValue) (map[string]execBindParameter, if t == nullType || t == unSupportedType { t = textType // if null or not supported, pass to GS as text } - bindValues[strconv.Itoa(idx)] = execBindParameter{ + bindValues[bindingName(binding, idx)] = execBindParameter{ Type: t.String(), Value: val, } @@ -245,6 +253,13 @@ func getBindValues(bindings []driver.NamedValue) (map[string]execBindParameter, return bindValues, nil } +func bindingName(nv driver.NamedValue, idx int) string { + if nv.Name != "" { + return nv.Name + } + return strconv.Itoa(idx) +} + func arrayBindValueCount(bindValues []driver.NamedValue) int { if !isArrayBind(bindValues) { return 0 @@ -298,3 +313,12 @@ func supportedArrayBind(nv *driver.NamedValue) bool { return false } } + +func supportedNullBind(nv *driver.NamedValue) bool { + switch reflect.TypeOf(nv.Value) { + case reflect.TypeOf(sql.NullString{}), reflect.TypeOf(sql.NullInt64{}), + reflect.TypeOf(sql.NullBool{}), reflect.TypeOf(sql.NullFloat64{}), reflect.TypeOf(TypedNullTime{}): + return true + } + return false +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/cacert.go b/vendor/github.com/snowflakedb/gosnowflake/cacert.go index 35ceea29..04b356eb 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/cacert.go +++ b/vendor/github.com/snowflakedb/gosnowflake/cacert.go @@ -7,7 +7,7 @@ const caRootPEM = ` ## ## Bundle of CA Root Certificates ## -## Certificate data from Mozilla as of: Wed Jul 22 03:12:14 2020 GMT +## Certificate data from Mozilla as of: Tue Aug 22 03:12:04 2023 GMT ## ## This is a bundle of X.509 certificates of public Certificate Authorities ## (CA). These were automatically extracted from Mozilla's root certificates @@ -19,8 +19,8 @@ const caRootPEM = ` ## an Apache+mod_ssl webserver for SSL client authentication. ## Just configure this file as the SSLCACertificateFile. ## -## Conversion done with mk-ca-bundle.pl version 1.28. -## SHA256: cc6408bd4be7fbfb8699bdb40ccb7f6de5780d681d87785ea362646e4dad5e8e +## Conversion done with mk-ca-bundle.pl version 1.29. +## SHA256: 0ff137babc6a5561a9cfbe9f29558972e5b528202681b7d3803d03a3e82922bd ## @@ -45,28 +45,6 @@ hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== -----END CERTIFICATE----- -GlobalSign Root CA - R2 -======================= ------BEGIN CERTIFICATE----- -MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv -YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh -bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT -aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln -bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 -ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp -s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN -S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL -TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C -ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i -YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN -BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp -9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu -01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 -9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 -TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== ------END CERTIFICATE----- - Entrust.net Premium 2048 Secure Server CA ========================================= -----BEGIN CERTIFICATE----- @@ -138,87 +116,6 @@ W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 -----END CERTIFICATE----- -GeoTrust Global CA -================== ------BEGIN CERTIFICATE----- -MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK -Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw -MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j -LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo -BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet -8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc -T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU -vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk -DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q -zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 -d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 -mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p -XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm -Mw== ------END CERTIFICATE----- - -GeoTrust Universal CA -===================== ------BEGIN CERTIFICATE----- -MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN -R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 -MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu -Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP -ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t -JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e -RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs -7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d -8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V -qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga -Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB -Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu -KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 -ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 -XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB -hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc -aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 -qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL -oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK -xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF -KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 -DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK -xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU -p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI -P/rmMuGNG2+k5o7Y+SlIis5z/iw= ------END CERTIFICATE----- - -GeoTrust Universal CA 2 -======================= ------BEGIN CERTIFICATE----- -MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN -R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 -MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg -SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 -DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 -j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q -JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a -QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 -WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP -20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn -ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC -SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG -8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 -+/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E -BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z -dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ -4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ -mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq -A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg -Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP -pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d -FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp -gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm -X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS ------END CERTIFICATE----- - Comodo AAA Services root ======================== -----BEGIN CERTIFICATE----- @@ -243,38 +140,6 @@ Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z 12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== -----END CERTIFICATE----- -QuoVadis Root CA -================ ------BEGIN CERTIFICATE----- -MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE -ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 -eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz -MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp -cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD -EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk -J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL -F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL -YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen -AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w -PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y -ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 -MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj -YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs -ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh -Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW -Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu -BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw -FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 -aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 -tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo -fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul -LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x -gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi -5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi -5nrQNiOKSnQ2+Q== ------END CERTIFICATE----- - QuoVadis Root CA 2 ================== -----BEGIN CERTIFICATE----- @@ -362,26 +227,6 @@ s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ FL39vmwLAw== -----END CERTIFICATE----- -Sonera Class 2 Root CA -====================== ------BEGIN CERTIFICATE----- -MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG -U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw -NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh -IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 -/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT -dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG -f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P -tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH -nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT -XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt -0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI -cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph -Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx -EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH -llpwrN9M ------END CERTIFICATE----- - XRamp Global CA Root ==================== -----BEGIN CERTIFICATE----- @@ -454,36 +299,6 @@ KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 QBFGmh95DmK/D5fs4C8fF5Q= -----END CERTIFICATE----- -Taiwan GRCA -=========== ------BEGIN CERTIFICATE----- -MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG -EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X -DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv -dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD -ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN -w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 -BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O -1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO -htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov -J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 -Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t -B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB -O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 -lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV -HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 -09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ -TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj -Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 -Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU -D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz -DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk -Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk -7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ -CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy -+fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS ------END CERTIFICATE----- - DigiCert Assured ID Root CA =========================== -----BEGIN CERTIFICATE----- @@ -550,26 +365,6 @@ mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K -----END CERTIFICATE----- -DST Root CA X3 -============== ------BEGIN CERTIFICATE----- -MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK -ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X -DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 -cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT -rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 -UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy -xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d -utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T -AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ -MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug -dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE -GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw -RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS -fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ ------END CERTIFICATE----- - SwissSign Gold CA - G2 ====================== -----BEGIN CERTIFICATE----- @@ -632,78 +427,6 @@ DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u -----END CERTIFICATE----- -GeoTrust Primary Certification Authority -======================================== ------BEGIN CERTIFICATE----- -MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG -EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD -ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx -CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ -cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN -b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 -nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge -RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt -tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD -AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI -hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K -Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN -NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa -Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG -1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= ------END CERTIFICATE----- - -thawte Primary Root CA -====================== ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE -BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 -aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv -cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 -MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg -SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv -KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT -FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs -oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ -1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc -q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K -aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p -afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD -VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF -AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE -uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX -xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 -jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH -z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== ------END CERTIFICATE----- - -VeriSign Class 3 Public Primary Certification Authority - G5 -============================================================ ------BEGIN CERTIFICATE----- -MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE -BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO -ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk -IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp -ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB -yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln -biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh -dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt -YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz -j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD -Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ -Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r -fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ -BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv -Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy -aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG -SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ -X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE -KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC -Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE -ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq ------END CERTIFICATE----- - SecureTrust CA ============== -----BEGIN CERTIFICATE----- @@ -772,29 +495,6 @@ IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN +8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== -----END CERTIFICATE----- -Network Solutions Certificate Authority -======================================= ------BEGIN CERTIFICATE----- -MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG -EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr -IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx -MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu -MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx -jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT -aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT -crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc -/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB -AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP -BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv -bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA -A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q -4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ -GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv -wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD -ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey ------END CERTIFICATE----- - COMODO ECC Certification Authority ================================== -----BEGIN CERTIFICATE----- @@ -812,29 +512,6 @@ FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= -----END CERTIFICATE----- -OISTE WISeKey Global Root GA CA -=============================== ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE -BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG -A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH -bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD -VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw -IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 -IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 -Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg -Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD -d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ -/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R -LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw -AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ -KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm -MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 -+vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa -hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY -okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= ------END CERTIFICATE----- - Certigna ======== -----BEGIN CERTIFICATE----- @@ -857,28 +534,6 @@ PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== -----END CERTIFICATE----- -Cybertrust Global Root -====================== ------BEGIN CERTIFICATE----- -MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li -ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 -MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD -ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA -+Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW -0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL -AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin -89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT -8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP -BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 -MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G -A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO -lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi -5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 -hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T -X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW -WL1WMRJOEcgh4LMRkWXbtKaIOM5V ------END CERTIFICATE----- - ePKI Root Certification Authority ================================= -----BEGIN CERTIFICATE----- @@ -930,136 +585,6 @@ vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD -----END CERTIFICATE----- -GeoTrust Primary Certification Authority - G3 -============================================= ------BEGIN CERTIFICATE----- -MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE -BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 -IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy -eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz -NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo -YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT -LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j -K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE -c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C -IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu -dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC -MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr -2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 -cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE -Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD -AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s -t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt ------END CERTIFICATE----- - -thawte Primary Root CA - G2 -=========================== ------BEGIN CERTIFICATE----- -MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC -VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu -IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg -Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV -MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG -b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt -IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS -LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 -8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU -mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN -G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K -rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== ------END CERTIFICATE----- - -thawte Primary Root CA - G3 -=========================== ------BEGIN CERTIFICATE----- -MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE -BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 -aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv -cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w -ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh -d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD -VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG -A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At -P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC -+BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY -7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW -vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E -BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ -KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK -A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu -t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC -8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm -er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= ------END CERTIFICATE----- - -GeoTrust Primary Certification Authority - G2 -============================================= ------BEGIN CERTIFICATE----- -MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC -VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu -Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD -ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 -OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg -MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl -b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG -BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc -KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD -VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ -EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m -ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 -npaqBA+K ------END CERTIFICATE----- - -VeriSign Universal Root Certification Authority -=============================================== ------BEGIN CERTIFICATE----- -MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE -BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO -ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk -IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u -IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV -UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv -cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl -IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 -aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj -1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP -MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 -9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I -AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR -tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G -CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O -a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud -DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 -Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx -Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx -P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P -wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 -mJO37M2CYfE45k+XmCpajQ== ------END CERTIFICATE----- - -VeriSign Class 3 Public Primary Certification Authority - G4 -============================================================ ------BEGIN CERTIFICATE----- -MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC -VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 -b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz -ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj -YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL -MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU -cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo -b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 -IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 -Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz -rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB -/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw -HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u -Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD -A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx -AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== ------END CERTIFICATE----- - NetLock Arany (Class Gold) FÅ‘tanúsítvány ======================================== -----BEGIN CERTIFICATE----- @@ -1084,26 +609,6 @@ NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= -----END CERTIFICATE----- -Hongkong Post Root CA 1 -======================= ------BEGIN CERTIFICATE----- -MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT -DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx -NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n -IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 -ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr -auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh -qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY -V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV -HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i -h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio -l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei -IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps -T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT -c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== ------END CERTIFICATE----- - SecureSign RootCA11 =================== -----BEGIN CERTIFICATE----- @@ -1235,82 +740,6 @@ Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== -----END CERTIFICATE----- -Chambers of Commerce Root - 2008 -================================ ------BEGIN CERTIFICATE----- -MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD -MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv -bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu -QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy -Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl -ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF -EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl -cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA -XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj -h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ -ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk -NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g -D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 -lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ -0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj -ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 -EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI -G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ -BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh -bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh -bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC -CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH -AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 -wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH -3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU -RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 -M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 -YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF -9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK -zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG -nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg -OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ ------END CERTIFICATE----- - -Global Chambersign Root - 2008 -============================== ------BEGIN CERTIFICATE----- -MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD -MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv -bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu -QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx -NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg -Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ -QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD -aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf -VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf -XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 -ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB -/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA -TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M -H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe -Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF -HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh -wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB -AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT -BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE -BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm -aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm -aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp -1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 -dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG -/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 -ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s -dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg -9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH -foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du -qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr -P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq -c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z -09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B ------END CERTIFICATE----- - Go Daddy Root Certificate Authority - G2 ======================================== -----BEGIN CERTIFICATE----- @@ -1527,60 +956,6 @@ tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 -----END CERTIFICATE----- -EC-ACC -====== ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE -BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w -ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD -VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE -CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT -BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 -MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt -SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl -Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh -cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK -w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT -ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 -HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a -E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw -0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E -BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD -VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 -Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l -dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ -lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa -Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe -l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 -E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D -5EI= ------END CERTIFICATE----- - -Hellenic Academic and Research Institutions RootCA 2011 -======================================================= ------BEGIN CERTIFICATE----- -MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT -O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y -aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z -IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT -AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z -IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo -IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI -1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa -71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u -8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH -3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ -MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 -MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu -b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt -XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 -TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD -/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N -7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 ------END CERTIFICATE----- - Actalis Authentication Root CA ============================== -----BEGIN CERTIFICATE----- @@ -1612,27 +987,6 @@ OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== -----END CERTIFICATE----- -Trustis FPS Root CA -=================== ------BEGIN CERTIFICATE----- -MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG -EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 -IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV -BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ -RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk -H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa -cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt -o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA -AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd -BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c -GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC -yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P -8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV -l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl -iB6XzCGcKQENZetX2fNXlrtIzYE= ------END CERTIFICATE----- - Buypass Class 2 Root CA ======================= -----BEGIN CERTIFICATE----- @@ -1715,30 +1069,6 @@ P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== -----END CERTIFICATE----- -EE Certification Centre Root CA -=============================== ------BEGIN CERTIFICATE----- -MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG -EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy -dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw -MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB -UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy -ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB -DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM -TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 -rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw -93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN -P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T -AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ -MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF -BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj -xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM -lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u -uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU -3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM -dcGWxZ0= ------END CERTIFICATE----- - D-TRUST Root Class 3 CA 2 2009 ============================== -----BEGIN CERTIFICATE----- @@ -1917,40 +1247,6 @@ Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= -----END CERTIFICATE----- -E-Tugra Certification Authority -=============================== ------BEGIN CERTIFICATE----- -MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w -DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls -ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN -ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw -NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx -QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl -cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD -DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A -MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd -hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K -CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g -ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ -BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 -E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz -rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq -jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn -rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 -dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB -/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG -MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK -kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO -XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 -VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo -a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc -dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV -KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT -Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 -8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G -C7TbO6Orb1wdtn7os4I07QZcJA== ------END CERTIFICATE----- - T-TeleSec GlobalRoot Class 2 ============================ -----BEGIN CERTIFICATE----- @@ -2272,20 +1568,6 @@ HU6+4WMBzzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbWRNZu 9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= -----END CERTIFICATE----- -GlobalSign ECC Root CA - R4 -=========================== ------BEGIN CERTIFICATE----- -MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEkMCIGA1UECxMb -R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD -EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb -R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD -EwpHbG9iYWxTaWduMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprl -OQcJFspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAwDgYDVR0P -AQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61FuOJAf/sKbvu+M8k8o4TV -MAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGXkPoUVy0D7O48027KqGx2vKLeuwIgJ6iF -JzWbVsaj8kfSt24bAgAXqmemFZHe+pTsewv4n4Q= ------END CERTIFICATE----- - GlobalSign ECC Root CA - R5 =========================== -----BEGIN CERTIFICATE----- @@ -2301,66 +1583,6 @@ uglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7 yFz9SO8NdCKoCOJuxUnOxwy8p2Fp8fc74SrL+SvzZpA3 -----END CERTIFICATE----- -Staat der Nederlanden Root CA - G3 -================================== ------BEGIN CERTIFICATE----- -MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE -CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g -Um9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloXDTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMC -TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l -ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4y -olQPcPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WWIkYFsO2t -x1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqXxz8ecAgwoNzFs21v0IJy -EavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFyKJLZWyNtZrVtB0LrpjPOktvA9mxjeM3K -Tj215VKb8b475lRgsGYeCasH/lSJEULR9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUur -mkVLoR9BvUhTFXFkC4az5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU5 -1nus6+N86U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7Ngzp -07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHPbMk7ccHViLVlvMDo -FxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXtBznaqB16nzaeErAMZRKQFWDZJkBE -41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTtXUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMB -AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleu -yjWcLhL75LpdINyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD -U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwpLiniyMMB8jPq -KqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8Ipf3YF3qKS9Ysr1YvY2WTxB1 -v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixpgZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA -8KCWAg8zxXHzniN9lLf9OtMJgwYh/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b -8KKaa8MFSu1BYBQw0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0r -mj1AfsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq4BZ+Extq -1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR1VmiiXTTn74eS9fGbbeI -JG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/QFH1T/U67cjF68IeHRaVesd+QnGTbksV -tzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM94B7IWcnMFk= ------END CERTIFICATE----- - -Staat der Nederlanden EV Root CA -================================ ------BEGIN CERTIFICATE----- -MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJOTDEeMBwGA1UE -CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFhdCBkZXIgTmVkZXJsYW5kZW4g -RVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0yMjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5M -MR4wHAYDVQQKDBVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRl -cmxhbmRlbiBFViBSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkk -SzrSM4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nCUiY4iKTW -O0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3dZ//BYY1jTw+bbRcwJu+r -0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46prfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8 -Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13lpJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gV -XJrm0w912fxBmJc+qiXbj5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr -08C+eKxCKFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS/ZbV -0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0XcgOPvZuM5l5Tnrmd -74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH1vI4gnPah1vlPNOePqc7nvQDs/nx -fRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrPpx9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNC -MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwa -ivsnuL8wbqg7MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI -eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u2dfOWBfoqSmu -c0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHSv4ilf0X8rLiltTMMgsT7B/Zq -5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTCwPTxGfARKbalGAKb12NMcIxHowNDXLldRqAN -b/9Zjr7dn3LDWyvfjFvO5QxGbJKyCqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tN -f1zuacpzEPuKqf2evTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi -5Dp6Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIaGl6I6lD4 -WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeLeG9QgkRQP2YGiqtDhFZK -DyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGy -eUN51q1veieQA6TqJIc/2b3Z6fJfUEkc7uzXLg== ------END CERTIFICATE----- - IdenTrust Commercial Root CA 1 ============================== -----BEGIN CERTIFICATE----- @@ -2812,87 +2034,6 @@ F8Io2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV09tL7ECQ aaApJUqlyyvdimYHFngVV3Eb7PVHhPOeMTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== -----END CERTIFICATE----- -TrustCor RootCert CA-1 -====================== ------BEGIN CERTIFICATE----- -MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYDVQQGEwJQQTEP -MA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEkMCIGA1UECgwbVHJ1c3RDb3Ig -U3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3Jp -dHkxHzAdBgNVBAMMFlRydXN0Q29yIFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkx -MjMxMTcyMzE2WjCBpDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFu -YW1hIENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUGA1UECwwe -VHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZUcnVzdENvciBSb290Q2Vy -dCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv463leLCJhJrMxnHQFgKq1mq -jQCj/IDHUHuO1CAmujIS2CNUSSUQIpidRtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4 -pQa81QBeCQryJ3pS/C3Vseq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0 -JEsq1pme9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CVEY4h -gLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorWhnAbJN7+KIor0Gqw -/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/DeOxCbeKyKsZn3MzUOcwHwYDVR0j -BBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC -AYYwDQYJKoZIhvcNAQELBQADggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5 -mDo4Nvu7Zp5I/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf -ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZyonnMlo2HD6C -qFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djtsL1Ac59v2Z3kf9YKVmgenFK+P -3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdNzl/HHk484IkzlQsPpTLWPFp5LBk= ------END CERTIFICATE----- - -TrustCor RootCert CA-2 -====================== ------BEGIN CERTIFICATE----- -MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNVBAYTAlBBMQ8w -DQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQwIgYDVQQKDBtUcnVzdENvciBT -eXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRydXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0 -eTEfMB0GA1UEAwwWVHJ1c3RDb3IgUm9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEy -MzExNzI2MzlaMIGkMQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5h -bWEgQ2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U -cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29yIFJvb3RDZXJ0 -IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnIG7CKqJiJJWQdsg4foDSq8Gb -ZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9Nk -RvRUqdw6VC0xK5mC8tkq1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1 -oYxOdqHp2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nKDOOb -XUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hapeaz6LMvYHL1cEksr1 -/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF3wP+TfSvPd9cW436cOGlfifHhi5q -jxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQP -eSghYA2FFn3XVDjxklb9tTNMg9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+Ctg -rKAmrhQhJ8Z3mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh -8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAdBgNVHQ4EFgQU -2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6UnrybPZx9mCAZ5YwwYrIwDwYD -VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/h -Osh80QA9z+LqBrWyOrsGS2h60COXdKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnp -kpfbsEZC89NiqpX+MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv -2wnL/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RXCI/hOWB3 -S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYaZH9bDTMJBzN7Bj8RpFxw -PIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dv -DDqPys/cA8GiCcjl/YBeyGBCARsaU1q7N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYU -RpFHmygk71dSTlxCnKr3Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANE -xdqtvArBAs8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp5KeX -RKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu1uwJ ------END CERTIFICATE----- - -TrustCor ECA-1 -============== ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYDVQQGEwJQQTEP -MA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEkMCIGA1UECgwbVHJ1c3RDb3Ig -U3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3Jp -dHkxFzAVBgNVBAMMDlRydXN0Q29yIEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3Mjgw -N1owgZwxCzAJBgNVBAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5 -MSQwIgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRydXN0Q29y -IENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3IgRUNBLTEwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb3w9U73NjKYKtR8aja+3+XzP4Q1HpGjOR -MRegdMTUpwHmspI+ap3tDvl0mEDTPwOABoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23 -xFUfJ3zSCNV2HykVh0A53ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmc -p0yJF4OuowReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/wZ0+ -fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZFZtS6mFjBAgMBAAGj -YzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAfBgNVHSMEGDAWgBREnkj1zG1I1KBL -f/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF -AAOCAQEABT41XBVwm8nHc2FvcivUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u -/ukZMjgDfxT2AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F -hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50soIipX1TH0Xs -J5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BIWJZpTdwHjFGTot+fDz2LYLSC -jaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1WitJ/X5g== ------END CERTIFICATE----- - SSL.com Root Certification Authority RSA ======================================== -----BEGIN CERTIFICATE----- @@ -3037,96 +2178,6 @@ AwMDaAAwZQIwJsdpW9zV57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtk AjEA2zQgMgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 -----END CERTIFICATE----- -GTS Root R1 -=========== ------BEGIN CERTIFICATE----- -MIIFWjCCA0KgAwIBAgIQbkepxUtHDA3sM9CJuRz04TANBgkqhkiG9w0BAQwFADBHMQswCQYDVQQG -EwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJv -b3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAG -A1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIi -MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx -9vaMf/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7wCl7r -aKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjwTcLCeoiKu7rPWRnW -r4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0PfyblqAj+lug8aJRT7oM6iCsVlgmy4HqM -LnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly -4cpk9+aCEI3oncKKiPo4Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr -06zqkUspzBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92 -wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70paDPvOmbsB4om -3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrNVjzRlwW5y0vtOUucxD/SVRNu -JLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD -VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEM -BQADggIBADiWCu49tJYeX++dnAsznyvgyv3SjgofQXSlfKqE1OXyHuY3UjKcC9FhHb8owbZEKTV1 -d5iyfNm9dKyKaOOpMQkpAWBz40d8U6iQSifvS9efk+eCNs6aaAyC58/UEBZvXw6ZXPYfcX3v73sv -fuo21pdwCxXu11xWajOl40k4DLh9+42FpLFZXvRq4d2h9mREruZRgyFmxhE+885H7pwoHyXa/6xm -ld01D1zvICxi/ZG6qcz8WpyTgYMpl0p8WnK0OdC3d8t5/Wk6kjftbjhlRn7pYL15iJdfOBL07q9b -gsiG1eGZbYwE8na6SfZu6W0eX6DvJ4J2QPim01hcDyxC2kLGe4g0x8HYRZvBPsVhHdljUEn2NIVq -4BjFbkerQUIpm/ZgDdIx02OYI5NaAIFItO/Nis3Jz5nu2Z6qNuFoS3FJFDYoOj0dzpqPJeaAcWEr -tXvM+SUWgeExX6GjfhaknBZqlxi9dnKlC54dNuYvoS++cJEPqOba+MSSQGwlfnuzCdyyF62ARPBo -pY+Udf90WuioAnwMCeKpSwughQtiue+hMZL77/ZRBIls6Kl0obsXs7X9SQ98POyDGCBDTtWTurQ0 -sR8WNh8M5mQ5Fkzc4P4dyKliPUDqysU0ArSuiYgzNdwsE3PYJ/HQcu51OyLemGhmW/HGY0dVHLql -CFF1pkgl ------END CERTIFICATE----- - -GTS Root R2 -=========== ------BEGIN CERTIFICATE----- -MIIFWjCCA0KgAwIBAgIQbkepxlqz5yDFMJo/aFLybzANBgkqhkiG9w0BAQwFADBHMQswCQYDVQQG -EwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJv -b3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAG -A1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIi -MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTuk -k3LvCvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY6Dlo -7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAuMC6C/Pq8tBcKSOWI -m8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7kRXuJVfeKH2JShBKzwkCX44ofR5Gm -dFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbu -ak7MkogwTZq9TwtImoS1mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscsz -cTJGr61K8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW -Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKaG73Vululycsl -aVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCqgc7dGtxRcw1PcOnlthYhGXmy -5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD -VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEM -BQADggIBALZp8KZ3/p7uC4Gt4cCpx/k1HUCCq+YEtN/L9x0Pg/B+E02NjO7jMyLDOfxA325BS0JT -vhaI8dI4XsRomRyYUpOM52jtG2pzegVATX9lO9ZY8c6DR2Dj/5epnGB3GFW1fgiTz9D2PGcDFWEJ -+YF59exTpJ/JjwGLc8R3dtyDovUMSRqodt6Sm2T4syzFJ9MHwAiApJiS4wGWAqoC7o87xdFtCjMw -c3i5T1QWvwsHoaRc5svJXISPD+AVdyx+Jn7axEvbpxZ3B7DNdehyQtaVhJ2Gg/LkkM0JR9SLA3Da -WsYDQvTtN6LwG1BUSw7YhN4ZKJmBR64JGz9I0cNv4rBgF/XuIwKl2gBbbZCr7qLpGzvpx0QnRY5r -n/WkhLx3+WuXrD5RRaIRpsyF7gpo8j5QOHokYh4XIDdtak23CZvJ/KRY9bb7nE4Yu5UC56Gtmwfu -Nmsk0jmGwZODUNKBRqhfYlcsu2xkiAhu7xNUX90txGdj08+JN7+dIPT7eoOboB6BAFDC5AwiWVIQ -7UNWhwD4FFKnHYuTjKJNRn8nxnGbJN7k2oaLDX5rIMHAnuFl2GqjpuiFizoHCBy69Y9Vmhh1fuXs -gWbRIXOhNUQLgD1bnF5vKheW0YMjiGZt5obicDIvUiLnyOd/xCxgXS/Dr55FBcOEArf9LAhST4Ld -o/DUhgkC ------END CERTIFICATE----- - -GTS Root R3 -=========== ------BEGIN CERTIFICATE----- -MIICDDCCAZGgAwIBAgIQbkepx2ypcyRAiQ8DVd2NHTAKBggqhkjOPQQDAzBHMQswCQYDVQQGEwJV -UzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3Qg -UjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UE -ChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcq -hkjOPQIBBgUrgQQAIgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUU -Rout736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL24Cej -QjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTB8Sa6oC2uhYHP -0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEAgFukfCPAlaUs3L6JbyO5o91lAFJekazInXJ0 -glMLfalAvWhgxeG4VDvBNhcl2MG9AjEAnjWSdIUlUfUk7GRSJFClH9voy8l27OyCbvWFGFPouOOa -KaqW04MjyaR7YbPMAuhd ------END CERTIFICATE----- - -GTS Root R4 -=========== ------BEGIN CERTIFICATE----- -MIICCjCCAZGgAwIBAgIQbkepyIuUtui7OyrYorLBmTAKBggqhkjOPQQDAzBHMQswCQYDVQQGEwJV -UzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3Qg -UjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UE -ChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcq -hkjOPQIBBgUrgQQAIgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa -6zzuhXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvRHYqj -QjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSATNbrdP9JNqPV -2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNnADBkAjBqUFJ0CMRw3J5QdCHojXohw0+WbhXRIjVhLfoI -N+4Zba3bssx9BzT1YBkstTTZbyACMANxsbqjYAuG7ZoIapVon+Kz4ZNkfF6Tpt95LY2F45TPI11x -zPKwTdb+mciUqXWi4w== ------END CERTIFICATE----- - UCA Global G2 Root ================== -----BEGIN CERTIFICATE----- @@ -3451,4 +2502,957 @@ Sxfj03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZkPuXaTH4M NMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE1LlSVHJ7liXMvGnjSG4N 0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MXQRBdJ3NghVdJIgc= -----END CERTIFICATE----- + +Trustwave Global Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQswCQYDVQQGEwJV +UzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2 +ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0xNzA4MjMxOTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJV +UzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2 +ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALldUShLPDeS0YLOvR29 +zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0XznswuvCAAJWX/NKSqIk4cXGIDtiLK0thAf +LdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4Bq +stTnoApTAbqOl5F2brz81Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9o +WN0EACyW80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotPJqX+ +OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1lRtzuzWniTY+HKE40 +Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfwhI0Vcnyh78zyiGG69Gm7DIwLdVcE +uE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm ++9jaJXLE9gCxInm943xZYkqcBW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqj +ifLJS3tBEW1ntwiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1UdDwEB/wQEAwIB +BjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W0OhUKDtkLSGm+J1WE2pIPU/H +PinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfeuyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0H +ZJDmHvUqoai7PF35owgLEQzxPy0QlG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla +4gt5kNdXElE1GYhBaCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5R +vbbEsLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPTMaCm/zjd +zyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qequ5AvzSxnI9O4fKSTx+O +856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxhVicGaeVyQYHTtgGJoC86cnn+OjC/QezH +Yj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu +3R3y4G5OBVixwJAWKqQ9EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP +29FpHOTKyeC2nOnOcXHebD8WpHk= +-----END CERTIFICATE----- + +Trustwave Global ECC P256 Certification Authority +================================================= +-----BEGIN CERTIFICATE----- +MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYDVQQGEwJVUzER +MA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRy +dXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDI1 +NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABH77bOYj +43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoNFWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqm +P62jQzBBMA8GA1UdEwEB/wQFMAMBAf8wDwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt +0UrrdaVKEJmzsaGLSvcwCgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjz +RM4q3wghDDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7 +-----END CERTIFICATE----- + +Trustwave Global ECC P384 Certification Authority +================================================= +-----BEGIN CERTIFICATE----- +MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYDVQQGEwJVUzER +MA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRy +dXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDM4 +NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuBBAAiA2IABGvaDXU1CDFH +Ba5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJj9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr +/TklZvFe/oyujUF5nQlgziip04pt89ZF1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNV +HQ8BAf8EBQMDBwYAMB0GA1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNn +ADBkAjA3AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsCMGcl +CrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVuSw== +-----END CERTIFICATE----- + +NAVER Global Root Certification Authority +========================================= +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEMBQAwaTELMAkG +A1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRGT1JNIENvcnAuMTIwMAYDVQQD +DClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4 +NDJaFw0zNzA4MTgyMzU5NTlaMGkxCzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVT +UyBQTEFURk9STSBDb3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVAiQqrDZBb +UGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH38dq6SZeWYp34+hInDEW ++j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lEHoSTGEq0n+USZGnQJoViAbbJAh2+g1G7 +XNr4rRVqmfeSVPc0W+m/6imBEtRTkZazkVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2 +aacp+yPOiNgSnABIqKYPszuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4 +Yb8ObtoqvC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHfnZ3z +VHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaGYQ5fG8Ir4ozVu53B +A0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo0es+nPxdGoMuK8u180SdOqcXYZai +cdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3aCJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejy +YhbLgGvtPe31HzClrkvJE+2KAQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNV +HQ4EFgQU0p+I36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoNqo0hV4/GPnrK +21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatjcu3cvuzHV+YwIHHW1xDBE1UB +jCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm+LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bx +hYTeodoS76TiEJd6eN4MUZeoIUCLhr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTg +E34h5prCy8VCZLQelHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTH +D8z7p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8piKCk5XQ +A76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLRLBT/DShycpWbXgnbiUSY +qqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oG +I/hGoiLtk/bdmuYqh7GYVPEi92tF4+KOdh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmg +kpzNNIaRkPpkUZ3+/uul9XXeifdy +-----END CERTIFICATE----- + +AC RAIZ FNMT-RCM SERVIDORES SEGUROS +=================================== +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQswCQYDVQQGEwJF +UzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgwFgYDVQRhDA9WQVRFUy1RMjgy +NjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1SQ00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4 +MTIyMDA5MzczM1oXDTQzMTIyMDA5MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQt +UkNNMQ4wDAYDVQQLDAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNB +QyBSQUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuBBAAiA2IA +BPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LHsbI6GA60XYyzZl2hNPk2 +LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oKUm8BA06Oi6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqG +SM49BAMDA2kAMGYCMQCuSuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoD +zBOQn5ICMQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJyv+c= +-----END CERTIFICATE----- + +GlobalSign Root R46 +=================== +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUAMEYxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQDExNHbG9iYWxTaWduIFJv +b3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAX +BgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08Es +CVeJOaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQGvGIFAha/ +r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud316HCkD7rRlr+/fKYIje +2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo0q3v84RLHIf8E6M6cqJaESvWJ3En7YEt +bWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSEy132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvj +K8Cd+RTyG/FWaha/LIWFzXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD4 +12lPFzYE+cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCNI/on +ccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzsx2sZy/N78CsHpdls +eVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqaByFrgY/bxFn63iLABJzjqls2k+g9 +vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEM +BQADggIBAHx47PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti2kM3S+LGteWy +gxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIkpnnpHs6i58FZFZ8d4kuaPp92 +CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRFFRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZm +OUdkLG5NrmJ7v2B0GbhWrJKsFjLtrWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qq +JZ4d16GLuc1CLgSkZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwye +qiv5u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP4vkYxboz +nxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6N3ec592kD3ZDZopD8p/7 +DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3vouXsXgxT7PntgMTzlSdriVZzH81Xwj3 +QEUxeCp6 +-----END CERTIFICATE----- + +GlobalSign Root E46 +=================== +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYxCzAJBgNVBAYT +AkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQDExNHbG9iYWxTaWduIFJvb3Qg +RTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNV +BAoTEEdsb2JhbFNpZ24gbnYtc2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkB +jtjqR+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGddyXqBPCCj +QjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQxCpCPtsad0kRL +gLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZk +vLtoURMMA/cVi4RguYv/Uo7njLwcAjA8+RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+ +CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +GLOBALTRUST 2020 +================ +-----BEGIN CERTIFICATE----- +MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkGA1UEBhMCQVQx +IzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVT +VCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYxMDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAh +BgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAy +MDIwMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWi +D59bRatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9ZYybNpyrO +VPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3QWPKzv9pj2gOlTblzLmM +CcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPwyJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCm +fecqQjuCgGOlYx8ZzHyyZqjC0203b+J+BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKA +A1GqtH6qRNdDYfOiaxaJSaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9OR +JitHHmkHr96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj04KlG +DfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9MedKZssCz3AwyIDMvU +clOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIwq7ejMZdnrY8XD2zHc+0klGvIg5rQ +mjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1Ud +IwQYMBaAFNwuH9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA +VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJCXtzoRlgHNQIw +4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd6IwPS3BD0IL/qMy/pJTAvoe9 +iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf+I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS +8cE54+X1+NZK3TTN+2/BT+MAi1bikvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2 +HcqtbepBEX4tdJP7wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxS +vTOBTI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6CMUO+1918 +oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn4rnvyOL2NSl6dPrFf4IF +YqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+IaFvowdlxfv1k7/9nR4hYJS8+hge9+6jl +gqispdNpQ80xiEmEU5LAsTkbOYMBMMTyqfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg== +-----END CERTIFICATE----- + +ANF Secure Server Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNVBAUTCUc2MzI4 +NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lv +bjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNVBAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3Qg +Q0EwHhcNMTkwOTA0MTAwMDM4WhcNMzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEw +MQswCQYDVQQGEwJFUzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQw +EgYDVQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9vdCBDQTCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCjcqQZAZ2cC4Ffc0m6p6zz +BE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9qyGFOtibBTI3/TO80sh9l2Ll49a2pcbnv +T1gdpd50IJeh7WhM3pIXS7yr/2WanvtH2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcv +B2VSAKduyK9o7PQUlrZXH1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXse +zx76W0OLzc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyRp1RM +VwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQzW7i1o0TJrH93PB0j +7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/SiOL9V8BY9KHcyi1Swr1+KuCLH5z +JTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJnLNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe +8TZBAQIvfXOn3kLMTOmJDVb3n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVO +Hj1tyRRM4y5Bu8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAOBgNVHQ8BAf8E +BAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEATh65isagmD9uw2nAalxJ +UqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzx +j6ptBZNscsdW699QIyjlRRA96Gejrw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDt +dD+4E5UGUcjohybKpFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM +5gf0vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjqOknkJjCb +5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ/zo1PqVUSlJZS2Db7v54 +EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ92zg/LFis6ELhDtjTO0wugumDLmsx2d1H +hk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI+PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGy +g77FGr8H6lnco4g175x2MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3 +r5+qPeoott7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +Certum EC-384 CA +================ +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQswCQYDVQQGEwJQ +TDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2 +MDcyNDU0WhcNNDMwMzI2MDcyNDU0WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERh +dGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkx +GTAXBgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATEKI6rGFtq +vm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7TmFy8as10CW4kjPMIRBSqn +iBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68KjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFI0GZnQkdjrzife81r1HfS+8EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNo +ADBlAjADVS2m5hjEfO/JUG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0 +QoSZ/6vnnvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +Certum Trusted Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6MQswCQYDVQQG +EwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0g +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0Ew +HhcNMTgwMzE2MTIxMDEzWhcNNDMwMzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMY +QXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZn0EGze2jusDbCSzBfN8p +fktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/qp1x4EaTByIVcJdPTsuclzxFUl6s1wB52 +HO8AU5853BSlLCIls3Jy/I2z5T4IHhQqNwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2 +fJmItdUDmj0VDT06qKhF8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGt +g/BKEiJ3HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGamqi4 +NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi7VdNIuJGmj8PkTQk +fVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSFytKAQd8FqKPVhJBPC/PgP5sZ0jeJ +P/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0PqafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSY +njYJdmZm/Bo/6khUHL4wvYBQv3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHK +HRzQ+8S1h9E6Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQADggIBAEii1QAL +LtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4WxmB82M+w85bj/UvXgF2Ez8s +ALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvozMrnadyHncI013nR03e4qllY/p0m+jiGPp2K +h2RX5Rc64vmNueMzeMGQ2Ljdt4NR5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8 +CYyqOhNf6DR5UMEQGfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA +4kZf5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq0Uc9Nneo +WWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7DP78v3DSk+yshzWePS/Tj +6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTMqJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmT +OPQD8rv7gmsHINFSH5pkAnuYZttcTVoP0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZck +bxJF0WddCajJFdr60qZfE2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +TunTrust Root CA +================ +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQELBQAwYTELMAkG +A1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUgQ2VydGlmaWNhdGlvbiBFbGVj +dHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJvb3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQw +NDI2MDg1NzU2WjBhMQswCQYDVQQGEwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBD +ZXJ0aWZpY2F0aW9uIEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZn56eY+hz +2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd2JQDoOw05TDENX37Jk0b +bjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgFVwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7 +NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZGoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAd +gjH8KcwAWJeRTIAAHDOFli/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViW +VSHbhlnUr8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2eY8f +Tpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIbMlEsPvLfe/ZdeikZ +juXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISgjwBUFfyRbVinljvrS5YnzWuioYas +DXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwS +VXAkPcvCFDVDXSdOvsC9qnyW5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI +04Y+oXNZtPdEITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+zxiD2BkewhpMl +0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYuQEkHDVneixCwSQXi/5E/S7fd +Ao74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRY +YdZ2vyJ/0Adqp2RT8JeNnYA/u8EH22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJp +adbGNjHh/PqAulxPxOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65x +xBzndFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5Xc0yGYuP +jCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7bnV2UqL1g52KAdoGDDIzM +MEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQCvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9z +ZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZHu/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3r +AZ3r2OvEhJn7wAzMMujjd9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +HARICA TLS RSA Root CA 2021 +=========================== +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBsMQswCQYDVQQG +EwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9u +cyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0EgUm9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUz +OFoXDTQ1MDIxMzEwNTUzN1owbDELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRl +bWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNB +IFJvb3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569lmwVnlskN +JLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE4VGC/6zStGndLuwRo0Xu +a2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uva9of08WRiFukiZLRgeaMOVig1mlDqa2Y +Ulhu2wr7a89o+uOkXjpFc5gH6l8Cct4MpbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K +5FrZx40d/JiZ+yykgmvwKh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEv +dmn8kN3bLW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcYAuUR +0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqBAGMUuTNe3QvboEUH +GjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYqE613TBoYm5EPWNgGVMWX+Ko/IIqm +haZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHrW2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQ +CPxrvrNQKlr9qEgYRtaQQJKQCoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAUX15QvWiWkKQU +EapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3f5Z2EMVGpdAgS1D0NTsY9FVq +QRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxajaH6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxD +QpSbIPDRzbLrLFPCU3hKTwSUQZqPJzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcR +j88YxeMn/ibvBZ3PzzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5 +vZStjBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0/L5H9MG0 +qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pTBGIBnfHAT+7hOtSLIBD6 +Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79aPib8qXPMThcFarmlwDB31qlpzmq6YR/ +PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YWxw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnn +kf3/W9b3raYvAwtt41dU63ZTGI0RmLo= +-----END CERTIFICATE----- + +HARICA TLS ECC Root CA 2021 +=========================== +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQswCQYDVQQGEwJH +UjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBD +QTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9vdCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoX +DTQ1MDIxMzExMDEwOVowbDELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWlj +IGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJv +b3QgQ0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7KKrxcm1l +AEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9YSTHMmE5gEYd103KUkE+b +ECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW +0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAi +rcJRQO9gcS3ujwLEXQNwSaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/Qw +CZ61IygNnxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1Ud +DgQWBBRlzeurNR4APn7VdMActHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4w +gZswgZgGBFUdIAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABCAG8AbgBhAG4A +bwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAwADEANzAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9miWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL +4QjbEwj4KKE1soCzC1HA01aajTNFSa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDb +LIpgD7dvlAceHabJhfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1il +I45PVf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZEEAEeiGaP +cjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV1aUsIC+nmCjuRfzxuIgA +LI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2tCsvMo2ebKHTEm9caPARYpoKdrcd7b/+A +lun4jWq9GJAd/0kakFI3ky88Al2CdgtR5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH +9IBk9W6VULgRfhVwOEqwf9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpf +NIbnYrX9ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNKGbqE +ZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +vTrus ECC Root CA +================= +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMwRzELMAkGA1UE +BhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBS +b290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDczMTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAa +BgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+c +ToL0v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUde4BdS49n +TPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIwV53dVvHH4+m4SVBrm2nDb+zDfSXkV5UT +QJtS0zvzQBm8JsctBp61ezaf9SXUY2sAAjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQL +YgmRWAD5Tfs0aNoJrSEGGJTO +-----END CERTIFICATE----- + +vTrus Root CA +============= +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQELBQAwQzELMAkG +A1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xFjAUBgNVBAMTDXZUcnVzIFJv +b3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMxMDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoG +A1UEChMTaVRydXNDaGluYSBDby4sTHRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZots +SKYcIrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykUAyyNJJrI +ZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+GrPSbcKvdmaVayqwlHeF +XgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z98Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KA +YPxMvDVTAWqXcoKv8R1w6Jz1717CbMdHflqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70 +kLJrxLT5ZOrpGgrIDajtJ8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2 +AXPKBlim0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZNpGvu +/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQUqqzApVg+QxMaPnu +1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHWOXSuTEGC2/KmSNGzm/MzqvOmwMVO +9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMBAAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYg +scasGrz2iTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOC +AgEAKbqSSaet8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1jbhd47F18iMjr +jld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvMKar5CKXiNxTKsbhm7xqC5PD4 +8acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIivTDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJn +xDHO2zTlJQNgJXtxmOTAGytfdELSS8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554Wg +icEFOwE30z9J4nfrI8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4 +sEb9b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNBUvupLnKW +nyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1PTi07NEPhmg4NpGaXutIc +SkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929vensBxXVsFy6K2ir40zSbofitzmdHxghm+H +l3s= +-----END CERTIFICATE----- + +ISRG Root X2 +============ +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQswCQYDVQQGEwJV +UzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElT +UkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVT +MSkwJwYDVQQKEyBJbnRlcm5ldCBTZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNS +RyBSb290IFgyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0H +ttwW+1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9ItgKbppb +d9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZIzj0EAwMDaAAwZQIwe3lORlCEwkSHRhtF +cP9Ymd70/aTSVaYgLXTWNLxBo1BfASdWtL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5 +U6VR5CmD1/iQMVtCnwr1/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +HiPKI Root CA - G1 +================== +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBPMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xGzAZBgNVBAMMEkhpUEtJ +IFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRaFw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYT +AlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kg +Um9vdCBDQSAtIEcxMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0 +o9QwqNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twvVcg3Px+k +wJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6lZgRZq2XNdZ1AYDgr/SE +YYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnzQs7ZngyzsHeXZJzA9KMuH5UHsBffMNsA +GJZMoYFL3QRtU6M9/Aes1MU3guvklQgZKILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfd +hSi8MEyr48KxRURHH+CKFgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj +1jOXTyFjHluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDry+K4 +9a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ/W3c1pzAtH2lsN0/ +Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgMa/aOEmem8rJY5AIJEzypuxC00jBF +8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQD +AgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqcSE5XCV0vrPSl +tJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6FzaZsT0pPBWGTMpWmWSBUdGSquE +wx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9TcXzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07Q +JNBAsNB1CI69aO4I1258EHBGG3zgiLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv +5wiZqAxeJoBF1PhoL5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+Gpz +jLrFNe85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wrkkVbbiVg +hUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+vhV4nYWBSipX3tUZQ9rb +yltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQUYDksswBVLuT1sw5XxJFBAJw/6KXf6vb/ +yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R4 +=========================== +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYDVQQLExtHbG9i +YWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkds +b2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgwMTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9i +YWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkds +b2JhbFNpZ24wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkW +ymOxuYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNVHQ8BAf8E +BAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/+wpu+74zyTyjhNUwCgYI +KoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147bmF0774BxL4YSFlhgjICICadVGNA3jdg +UM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +GTS Root R1 +=========== +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQswCQYDVQQGEwJV +UzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3Qg +UjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UE +ChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM +f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7wCl7raKb0 +xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjwTcLCeoiKu7rPWRnWr4+w +B7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0PfyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXW +nOunVmSPlk9orj2XwoSPwLxAwAtcvfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk +9+aCEI3oncKKiPo4Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zq +kUspzBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92wO1A +K/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70paDPvOmbsB4om3xPX +V2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDW +cfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQAD +ggIBAJ+qQibbC5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuyh6f88/qBVRRi +ClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM47HLwEXWdyzRSjeZ2axfG34ar +J45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8JZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYci +NuaCp+0KueIHoI17eko8cdLiA6EfMgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5me +LMFrUKTX5hgUvYU/Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJF +fbdT6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ0E6yove+ +7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm2tIMPNuzjsmhDYAPexZ3 +FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bbbP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3 +gm3c +-----END CERTIFICATE----- + +GTS Root R2 +=========== +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQswCQYDVQQGEwJV +UzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3Qg +UjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UE +ChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv +CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY6Dlo7JUl +e3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAuMC6C/Pq8tBcKSOWIm8Wb +a96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS ++LFjKBC4swm4VndAoiaYecb+3yXuPuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7M +kogwTZq9TwtImoS1mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJG +r61K8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RWIr9q +S34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKaG73VululycslaVNV +J1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCqgc7dGtxRcw1PcOnlthYhGXmy5okL +dWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQAD +ggIBAB/Kzt3HvqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyCB19m3H0Q/gxh +swWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2uNmSRXbBoGOqKYcl3qJfEycel +/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMgyALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVn +jWQye+mew4K6Ki3pHrTgSAai/GevHyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y5 +9PYjJbigapordwj6xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M +7YNRTOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924SgJPFI/2R8 +0L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV7LXTWtiBmelDGDfrs7vR +WGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjW +HYbL +-----END CERTIFICATE----- + +GTS Root R3 +=========== +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYDVQQGEwJVUzEi +MCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMw +HhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZ +R29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjO +PQIBBgUrgQQAIgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout +736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL24CejQjBA +MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTB8Sa6oC2uhYHP0/Eq +Er24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azT +L818+FsuVbu/3ZL3pAzcMeGiAjEA/JdmZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV +11RZt+cRLInUue4X +-----END CERTIFICATE----- + +GTS Root R4 +=========== +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYDVQQGEwJVUzEi +MCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQw +HhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZ +R29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjO +PQIBBgUrgQQAIgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu +hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvRHYqjQjBA +MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSATNbrdP9JNqPV2Py1 +PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/C +r8deVl5c1RxYIigL9zC2L7F8AjEA8GE8p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh +4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +Telia Root CA v2 +================ +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQxCzAJBgNVBAYT +AkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2 +MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQK +DBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ7 +6zBqAMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9vVYiQJ3q +9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9lRdU2HhE8Qx3FZLgmEKn +pNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTODn3WhUidhOPFZPY5Q4L15POdslv5e2QJl +tI5c0BE0312/UqeBAMN/mUWZFdUXyApT7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW +5olWK8jjfN7j/4nlNW4o6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNr +RBH0pUPCTEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6WT0E +BXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63RDolUK5X6wK0dmBR4 +M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZIpEYslOqodmJHixBTB0hXbOKSTbau +BcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGjYzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7W +xy+G2CQ5MB0GA1UdDgQWBBRyrOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi0f6X+J8wfBj5 +tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMMA8iZGok1GTzTyVR8qPAs5m4H +eW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBSSRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+C +y748fdHif64W1lZYudogsYMVoe+KTTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygC +QMez2P2ccGrGKMOF6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15 +h2Er3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMtTy3EHD70 +sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pTVmBds9hCG1xLEooc6+t9 +xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAWysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQ +raVplI/owd8k+BsHMYeB2F326CjYSlKArBPuUBQemMc= +-----END CERTIFICATE----- + +D-TRUST BR Root CA 1 2020 +========================= +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQswCQYDVQQGEwJE +RTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRSVVNUIEJSIFJvb3QgQ0EgMSAy +MDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNV +BAoTDEQtVHJ1c3QgR21iSDEiMCAGA1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7 +dPYSzuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0QVK5buXu +QqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/VbNafAkl1bK6CKBrqx9t +MA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6gPKA6hjhodHRwOi8vY3JsLmQtdHJ1c3Qu +bmV0L2NybC9kLXRydXN0X2JyX3Jvb3RfY2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxP +PUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjOPQQD +AwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFWwKrY7RjEsK70Pvom +AjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHVdWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +D-TRUST EV Root CA 1 2020 +========================= +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQswCQYDVQQGEwJE +RTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRSVVNUIEVWIFJvb3QgQ0EgMSAy +MDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNV +BAoTDEQtVHJ1c3QgR21iSDEiMCAGA1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8 +ZRCC/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rDwpdhQntJ +raOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3OqQo5FD4pPfsazK2/umL +MA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6gPKA6hjhodHRwOi8vY3JsLmQtdHJ1c3Qu +bmV0L2NybC9kLXRydXN0X2V2X3Jvb3RfY2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxP +PUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjOPQQD +AwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CAy/m0sRtW9XLS/BnR +AjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJbgfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +DigiCert TLS ECC P384 Root G5 +============================= +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURpZ2lDZXJ0IFRMUyBFQ0MgUDM4 +NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDkRpZ2lDZXJ0LCBJbmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQg +Um9vdCBHNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1Tzvd +lHJS7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp0zVozptj +n4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICISB4CIfBFqMA4GA1UdDwEB +/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQCJao1H5+z8blUD2Wds +Jk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQLgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIx +AJSdYsiJvRmEFOml+wG4DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +DigiCert TLS RSA4096 Root G5 +============================ +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBNMQswCQYDVQQG +EwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0 +MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcNNDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2 +IFJvb3QgRzUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS8 +7IE+ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG02C+JFvuU +AT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgpwgscONyfMXdcvyej/Ces +tyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZMpG2T6T867jp8nVid9E6P/DsjyG244gXa +zOvswzH016cpVIDPRFtMbzCe88zdH5RDnU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnV +DdXifBBiqmvwPXbzP6PosMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9q +TXeXAaDxZre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cdLvvy +z6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvXKyY//SovcfXWJL5/ +MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNeXoVPzthwiHvOAbWWl9fNff2C+MIk +wcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPLtgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4E +FgQUUTMc7TZArxfTJc1paPKvTiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7HPNtQOa27PShN +lnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLFO4uJ+DQtpBflF+aZfTCIITfN +MBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQREtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/ +u4cnYiWB39yhL/btp/96j1EuMPikAdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9G +OUrYU9DzLjtxpdRv/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh +47a+p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilwMUc/dNAU +FvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WFqUITVuwhd4GTWgzqltlJ +yqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCKovfepEWFJqgejF0pW8hL2JpqA15w8oVP +bEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +Certainly Root R1 +================= +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAwPTELMAkGA1UE +BhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2VydGFpbmx5IFJvb3QgUjEwHhcN +MjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2Vy +dGFpbmx5MRowGAYDVQQDExFDZXJ0YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBANA21B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O +5MQTvqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbedaFySpvXl +8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b01C7jcvk2xusVtyWMOvwl +DbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGI +XsXwClTNSaa/ApzSRKft43jvRl5tcdF5cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkN +KPl6I7ENPT2a/Z2B7yyQwHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQ +AjeZjOVJ6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA2Cnb +rlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyHWyf5QBGenDPBt+U1 +VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMReiFPCyEQtkA6qyI6BJyLm4SGcprS +p6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBTgqj8ljZ9EXME66C6ud0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAsz +HQNTVfSVcOQrPbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi1wrykXprOQ4v +MMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrdrRT90+7iIgXr0PK3aBLXWopB +GsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9ditaY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+ +gjwN/KUD+nsa2UUeYNrEjvn8K8l7lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgH +JBu6haEaBQmAupVjyTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7 +fpYnKx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLyyCwzk5Iw +x06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5nwXARPbv0+Em34yaXOp/S +X3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +Certainly Root E1 +================= +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQswCQYDVQQGEwJV +UzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlubHkgUm9vdCBFMTAeFw0yMTA0 +MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJBgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlu +bHkxGjAYBgNVBAMTEUNlcnRhaW5seSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4 +fxzf7flHh4axpMCK+IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9 +YBk2QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4hevIIgcwCgYIKoZIzj0E +AwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozmut6Dacpps6kFtZaSF4fC0urQe87YQVt8 +rgIwRt7qy12a7DLCZRawTDBcMPPaTnOGBtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +Security Communication RootCA3 +============================== +-----BEGIN CERTIFICATE----- +MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNVBAYTAkpQMSUw +IwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScwJQYDVQQDEx5TZWN1cml0eSBD +b21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2MDYxNzE2WhcNMzgwMTE4MDYxNzE2WjBdMQsw +CQYDVQQGEwJKUDElMCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UE +AxMeU2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltzkBtnTCHsXzW7OT4rCmDvu20r +hvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOzQD11EKzAlrenfna84xtSGc4RHwsE +NPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MGTfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2 +/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF79+qMHIjH7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGm +npjKIG58u4iFW/vAEGK78vknR+/RiTlDxN/e4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtY +XLVqAvO4g160a75BflcJdURQVc1aEWEhCmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3weGVPK +p7FKFSBWFHA9K4IsD50VHUeAR/94mQ4xr28+j+2GaR57GIgUssL8gjMunEst+3A7caoreyYn8xrC +3PsXuKHqy6C0rtOUfnrQq8PsOC0RLoi/1D+tEjtCrI8Cbn3M0V9hvqG8OmpI6iZVIhZdXw3/JzOf +GAN0iltSIEdrRU0id4xVJ/CvHozJgyJUt5rQT9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0Vcw +CBEF/VfR2ccCAwEAAaNCMEAwHQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB +/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybS +YpOnpSNyByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PAFNr0Y/Dq9HHu +Tofjcan0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd9XbXv8S2gVj/yP9kaWJ5rW4O +H3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQIUYWg9by0F1jqClx6vWPGOi//lkkZhOpn2ASx +YfQAW0q3nHE3GYV5v4GwxxMOdnE+OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQDdwj98ClZ +XSEIx2C/pHF7uNkegr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO0QR4ynKudtml ++LLfiAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU1cXrvMUVnuiZIesn +KwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD2NCcnWXL0CsnMQMeNuE9 +dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI//1ZqmfHAuc1Uh6N//g7kdPjIe1qZ9LPFm +6Vwdp6POXiUyK+OVrCoHzrQoeIY8LaadTdJ0MN1kURXbg4NR16/9M51NZg== +-----END CERTIFICATE----- + +Security Communication ECC RootCA1 +================================== +-----BEGIN CERTIFICATE----- +MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYTAkpQMSUwIwYD +VQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYDVQQDEyJTZWN1cml0eSBDb21t +dW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYxNjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTEL +MAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNV +BAMTIlNlY3VyaXR5IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+CnnfdldB9sELLo +5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpKULGjQjBAMB0GA1UdDgQW +BBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAK +BggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3L +snNdo4gIxwwCMQDAqy0Obe0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70e +N9k= +-----END CERTIFICATE----- + +BJCA Global Root CA1 +==================== +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIQVW9l47TZkGobCdFsPsBsIDANBgkqhkiG9w0BAQsFADBUMQswCQYDVQQG +EwJDTjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRIT1JJVFkxHTAbBgNVBAMMFEJK +Q0EgR2xvYmFsIFJvb3QgQ0ExMB4XDTE5MTIxOTAzMTYxN1oXDTQ0MTIxMjAzMTYxN1owVDELMAkG +A1UEBhMCQ04xJjAkBgNVBAoMHUJFSUpJTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQD +DBRCSkNBIEdsb2JhbCBSb290IENBMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAPFm +CL3ZxRVhy4QEQaVpN3cdwbB7+sN3SJATcmTRuHyQNZ0YeYjjlwE8R4HyDqKYDZ4/N+AZspDyRhyS +sTphzvq3Rp4Dhtczbu33RYx2N95ulpH3134rhxfVizXuhJFyV9xgw8O558dnJCNPYwpj9mZ9S1Wn +P3hkSWkSl+BMDdMJoDIwOvqfwPKcxRIqLhy1BDPapDgRat7GGPZHOiJBhyL8xIkoVNiMpTAK+BcW +yqw3/XmnkRd4OJmtWO2y3syJfQOcs4ll5+M7sSKGjwZteAf9kRJ/sGsciQ35uMt0WwfCyPQ10WRj +eulumijWML3mG90Vr4TqnMfK9Q7q8l0ph49pczm+LiRvRSGsxdRpJQaDrXpIhRMsDQa4bHlW/KNn +MoH1V6XKV0Jp6VwkYe/iMBhORJhVb3rCk9gZtt58R4oRTklH2yiUAguUSiz5EtBP6DF+bHq/pj+b +OT0CFqMYs2esWz8sgytnOYFcuX6U1WTdno9uruh8W7TXakdI136z1C2OVnZOz2nxbkRs1CTqjSSh +GL+9V/6pmTW12xB3uD1IutbB5/EjPtffhZ0nPNRAvQoMvfXnjSXWgXSHRtQpdaJCbPdzied9v3pK +H9MiyRVVz99vfFXQpIsHETdfg6YmV6YBW37+WGgHqel62bno/1Afq8K0wM7o6v0PvY1NuLxxAgMB +AAGjQjBAMB0GA1UdDgQWBBTF7+3M2I0hxkjk49cULqcWk+WYATAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAUoKsITQfI/Ki2Pm4rzc2IInRNwPWaZ+4 +YRC6ojGYWUfo0Q0lHhVBDOAqVdVXUsv45Mdpox1NcQJeXyFFYEhcCY5JEMEE3KliawLwQ8hOnThJ +dMkycFRtwUf8jrQ2ntScvd0g1lPJGKm1Vrl2i5VnZu69mP6u775u+2D2/VnGKhs/I0qUJDAnyIm8 +60Qkmss9vk/Ves6OF8tiwdneHg56/0OGNFK8YT88X7vZdrRTvJez/opMEi4r89fO4aL/3Xtw+zuh +TaRjAv04l5U/BXCga99igUOLtFkNSoxUnMW7gZ/NfaXvCyUeOiDbHPwfmGcCCtRzRBPbUYQaVQNW +4AB+dAb/OMRyHdOoP2gxXdMJxy6MW2Pg6Nwe0uxhHvLe5e/2mXZgLR6UcnHGCyoyx5JO1UbXHfmp +GQrI+pXObSOYqgs4rZpWDW+N8TEAiMEXnM0ZNjX+VVOg4DwzX5Ze4jLp3zO7Bkqp2IRzznfSxqxx +4VyjHQy7Ct9f4qNx2No3WqB4K/TUfet27fJhcKVlmtOJNBir+3I+17Q9eVzYH6Eze9mCUAyTF6ps +3MKCuwJXNq+YJyo5UOGwifUll35HaBC07HPKs5fRJNz2YqAo07WjuGS3iGJCz51TzZm+ZGiPTx4S +SPfSKcOYKMryMguTjClPPGAyzQWWYezyr/6zcCwupvI= +-----END CERTIFICATE----- + +BJCA Global Root CA2 +==================== +-----BEGIN CERTIFICATE----- +MIICJTCCAaugAwIBAgIQLBcIfWQqwP6FGFkGz7RK6zAKBggqhkjOPQQDAzBUMQswCQYDVQQGEwJD +TjEmMCQGA1UECgwdQkVJSklORyBDRVJUSUZJQ0FURSBBVVRIT1JJVFkxHTAbBgNVBAMMFEJKQ0Eg +R2xvYmFsIFJvb3QgQ0EyMB4XDTE5MTIxOTAzMTgyMVoXDTQ0MTIxMjAzMTgyMVowVDELMAkGA1UE +BhMCQ04xJjAkBgNVBAoMHUJFSUpJTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZMR0wGwYDVQQDDBRC +SkNBIEdsb2JhbCBSb290IENBMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABJ3LgJGNU2e1uVCxA/jl +SR9BIgmwUVJY1is0j8USRhTFiy8shP8sbqjV8QnjAyEUxEM9fMEsxEtqSs3ph+B99iK++kpRuDCK +/eHeGBIK9ke35xe/J4rUQUyWPGCWwf0VHKNCMEAwHQYDVR0OBBYEFNJKsVF/BvDRgh9Obl+rg/xI +1LCRMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2gAMGUCMBq8 +W9f+qdJUDkpd0m2xQNz0Q9XSSpkZElaA94M04TVOSG0ED1cxMDAtsaqdAzjbBgIxAMvMh1PLet8g +UXOQwKhbYdDFUDn9hf7B43j4ptZLvZuHjw/l1lOWqzzIQNph91Oj9w== +-----END CERTIFICATE----- + +Sectigo Public Server Authentication Root E46 +============================================= +-----BEGIN CERTIFICATE----- +MIICOjCCAcGgAwIBAgIQQvLM2htpN0RfFf51KBC49DAKBggqhkjOPQQDAzBfMQswCQYDVQQGEwJH +QjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1YmxpYyBTZXJ2 +ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwHhcNMjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1OTU5 +WjBfMQswCQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0 +aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUr +gQQAIgNiAAR2+pmpbiDt+dd34wc7qNs9Xzjoq1WmVk/WSOrsfy2qw7LFeeyZYX8QeccCWvkEN/U0 +NSt3zn8gj1KjAIns1aeibVvjS5KToID1AZTc8GgHHs3u/iVStSBDHBv+6xnOQ6OjQjBAMB0GA1Ud +DgQWBBTRItpMWfFLXyY4qp3W7usNw/upYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAKBggqhkjOPQQDAwNnADBkAjAn7qRaqCG76UeXlImldCBteU/IvZNeWBj7LRoAasm4PdCkT0RH +lAFWovgzJQxC36oCMB3q4S6ILuH5px0CMk7yn2xVdOOurvulGu7t0vzCAxHrRVxgED1cf5kDW21U +SAGKcw== +-----END CERTIFICATE----- + +Sectigo Public Server Authentication Root R46 +============================================= +-----BEGIN CERTIFICATE----- +MIIFijCCA3KgAwIBAgIQdY39i658BwD6qSWn4cetFDANBgkqhkiG9w0BAQwFADBfMQswCQYDVQQG +EwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1TZWN0aWdvIFB1YmxpYyBT +ZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYwHhcNMjEwMzIyMDAwMDAwWhcNNDYwMzIxMjM1 +OTU5WjBfMQswCQYDVQQGEwJHQjEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTYwNAYDVQQDEy1T +ZWN0aWdvIFB1YmxpYyBTZXJ2ZXIgQXV0aGVudGljYXRpb24gUm9vdCBSNDYwggIiMA0GCSqGSIb3 +DQEBAQUAA4ICDwAwggIKAoICAQCTvtU2UnXYASOgHEdCSe5jtrch/cSV1UgrJnwUUxDaef0rty2k +1Cz66jLdScK5vQ9IPXtamFSvnl0xdE8H/FAh3aTPaE8bEmNtJZlMKpnzSDBh+oF8HqcIStw+Kxwf +GExxqjWMrfhu6DtK2eWUAtaJhBOqbchPM8xQljeSM9xfiOefVNlI8JhD1mb9nxc4Q8UBUQvX4yMP +FF1bFOdLvt30yNoDN9HWOaEhUTCDsG3XME6WW5HwcCSrv0WBZEMNvSE6Lzzpng3LILVCJ8zab5vu +ZDCQOc2TZYEhMbUjUDM3IuM47fgxMMxF/mL50V0yeUKH32rMVhlATc6qu/m1dkmU8Sf4kaWD5Qaz +Yw6A3OASVYCmO2a0OYctyPDQ0RTp5A1NDvZdV3LFOxxHVp3i1fuBYYzMTYCQNFu31xR13NgESJ/A +wSiItOkcyqex8Va3e0lMWeUgFaiEAin6OJRpmkkGj80feRQXEgyDet4fsZfu+Zd4KKTIRJLpfSYF +plhym3kT2BFfrsU4YjRosoYwjviQYZ4ybPUHNs2iTG7sijbt8uaZFURww3y8nDnAtOFr94MlI1fZ +EoDlSfB1D++N6xybVCi0ITz8fAr/73trdf+LHaAZBav6+CuBQug4urv7qv094PPK306Xlynt8xhW +6aWWrL3DkJiy4Pmi1KZHQ3xtzwIDAQABo0IwQDAdBgNVHQ4EFgQUVnNYZJX5khqwEioEYnmhQBWI +IUkwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAC9c +mTz8Bl6MlC5w6tIyMY208FHVvArzZJ8HXtXBc2hkeqK5Duj5XYUtqDdFqij0lgVQYKlJfp/imTYp +E0RHap1VIDzYm/EDMrraQKFz6oOht0SmDpkBm+S8f74TlH7Kph52gDY9hAaLMyZlbcp+nv4fjFg4 +exqDsQ+8FxG75gbMY/qB8oFM2gsQa6H61SilzwZAFv97fRheORKkU55+MkIQpiGRqRxOF3yEvJ+M +0ejf5lG5Nkc/kLnHvALcWxxPDkjBJYOcCj+esQMzEhonrPcibCTRAUH4WAP+JWgiH5paPHxsnnVI +84HxZmduTILA7rpXDhjvLpr3Etiga+kFpaHpaPi8TD8SHkXoUsCjvxInebnMMTzD9joiFgOgyY9m +pFuiTdaBJQbpdqQACj7LzTWb4OE4y2BThihCQRxEV+ioratF4yUQvNs+ZUH7G6aXD+u5dHn5Hrwd +Vw1Hr8Mvn4dGp+smWg9WY7ViYG4A++MnESLn/pmPNPW56MORcr3Ywx65LvKRRFHQV80MNNVIIb/b +E/FmJUNS0nAiNs2fxBx1IK1jcmMGDw4nztJqDby1ORrp0XZ60Vzk50lJLVU3aPAaOpg+VBeHVOmm +J1CJeyAvP/+/oYtKR5j/K3tJPsMpRmAYQqszKbrAKbkTidOIijlBO8n9pu0f9GBj39ItVQGL +-----END CERTIFICATE----- + +SSL.com TLS RSA Root CA 2022 +============================ +-----BEGIN CERTIFICATE----- +MIIFiTCCA3GgAwIBAgIQb77arXO9CEDii02+1PdbkTANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQG +EwJVUzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQDDBxTU0wuY29tIFRMUyBSU0Eg +Um9vdCBDQSAyMDIyMB4XDTIyMDgyNTE2MzQyMloXDTQ2MDgxOTE2MzQyMVowTjELMAkGA1UEBhMC +VVMxGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgUlNBIFJv +b3QgQ0EgMjAyMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANCkCXJPQIgSYT41I57u +9nTPL3tYPc48DRAokC+X94xI2KDYJbFMsBFMF3NQ0CJKY7uB0ylu1bUJPiYYf7ISf5OYt6/wNr/y +7hienDtSxUcZXXTzZGbVXcdotL8bHAajvI9AI7YexoS9UcQbOcGV0insS657Lb85/bRi3pZ7Qcac +oOAGcvvwB5cJOYF0r/c0WRFXCsJbwST0MXMwgsadugL3PnxEX4MN8/HdIGkWCVDi1FW24IBydm5M +R7d1VVm0U3TZlMZBrViKMWYPHqIbKUBOL9975hYsLfy/7PO0+r4Y9ptJ1O4Fbtk085zx7AGL0SDG +D6C1vBdOSHtRwvzpXGk3R2azaPgVKPC506QVzFpPulJwoxJF3ca6TvvC0PeoUidtbnm1jPx7jMEW +TO6Af77wdr5BUxIzrlo4QqvXDz5BjXYHMtWrifZOZ9mxQnUjbvPNQrL8VfVThxc7wDNY8VLS+YCk +8OjwO4s4zKTGkH8PnP2L0aPP2oOnaclQNtVcBdIKQXTbYxE3waWglksejBYSd66UNHsef8JmAOSq +g+qKkK3ONkRN0VHpvB/zagX9wHQfJRlAUW7qglFA35u5CCoGAtUjHBPW6dvbxrB6y3snm/vg1UYk +7RBLY0ulBY+6uB0rpvqR4pJSvezrZ5dtmi2fgTIFZzL7SAg/2SW4BCUvAgMBAAGjYzBhMA8GA1Ud +EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAU+y437uOEeicuzRk1sTN8/9REQrkwHQYDVR0OBBYEFPsu +N+7jhHonLs0ZNbEzfP/UREK5MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAjYlt +hEUY8U+zoO9opMAdrDC8Z2awms22qyIZZtM7QbUQnRC6cm4pJCAcAZli05bg4vsMQtfhWsSWTVTN +j8pDU/0quOr4ZcoBwq1gaAafORpR2eCNJvkLTqVTJXojpBzOCBvfR4iyrT7gJ4eLSYwfqUdYe5by +iB0YrrPRpgqU+tvT5TgKa3kSM/tKWTcWQA673vWJDPFs0/dRa1419dvAJuoSc06pkZCmF8NsLzjU +o3KUQyxi4U5cMj29TH0ZR6LDSeeWP4+a0zvkEdiLA9z2tmBVGKaBUfPhqBVq6+AL8BQx1rmMRTqo +ENjwuSfr98t67wVylrXEj5ZzxOhWc5y8aVFjvO9nHEMaX3cZHxj4HCUp+UmZKbaSPaKDN7Egkaib +MOlqbLQjk2UEqxHzDh1TJElTHaE/nUiSEeJ9DU/1172iWD54nR4fK/4huxoTtrEoZP2wAgDHbICi +vRZQIA9ygV/MlP+7mea6kMvq+cYMwq7FGc4zoWtcu358NFcXrfA/rs3qr5nsLFR+jM4uElZI7xc7 +P0peYNLcdDa8pUNjyw9bowJWCZ4kLOGGgYz+qxcs+sjiMho6/4UIyYOf8kpIEFR3N+2ivEC+5BB0 +9+Rbu7nzifmPQdjH5FCQNYA+HLhNkNPU98OwoX6EyneSMSy4kLGCenROmxMmtNVQZlR4rmA= +-----END CERTIFICATE----- + +SSL.com TLS ECC Root CA 2022 +============================ +-----BEGIN CERTIFICATE----- +MIICOjCCAcCgAwIBAgIQFAP1q/s3ixdAW+JDsqXRxDAKBggqhkjOPQQDAzBOMQswCQYDVQQGEwJV +UzEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMSUwIwYDVQQDDBxTU0wuY29tIFRMUyBFQ0MgUm9v +dCBDQSAyMDIyMB4XDTIyMDgyNTE2MzM0OFoXDTQ2MDgxOTE2MzM0N1owTjELMAkGA1UEBhMCVVMx +GDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjElMCMGA1UEAwwcU1NMLmNvbSBUTFMgRUNDIFJvb3Qg +Q0EgMjAyMjB2MBAGByqGSM49AgEGBSuBBAAiA2IABEUpNXP6wrgjzhR9qLFNoFs27iosU8NgCTWy +JGYmacCzldZdkkAZDsalE3D07xJRKF3nzL35PIXBz5SQySvOkkJYWWf9lCcQZIxPBLFNSeR7T5v1 +5wj4A4j3p8OSSxlUgaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBSJjy+j6CugFFR7 +81a4Jl9nOAuc0DAdBgNVHQ4EFgQUiY8vo+groBRUe/NWuCZfZzgLnNAwDgYDVR0PAQH/BAQDAgGG +MAoGCCqGSM49BAMDA2gAMGUCMFXjIlbp15IkWE8elDIPDAI2wv2sdDJO4fscgIijzPvX6yv/N33w +7deedWo1dlJF4AIxAMeNb0Igj762TVntd00pxCAgRWSGOlDGxK0tk/UYfXLtqc/ErFc2KAhl3zx5 +Zn6g6g== +-----END CERTIFICATE----- + +Atos TrustedRoot Root CA ECC TLS 2021 +===================================== +-----BEGIN CERTIFICATE----- +MIICFTCCAZugAwIBAgIQPZg7pmY9kGP3fiZXOATvADAKBggqhkjOPQQDAzBMMS4wLAYDVQQDDCVB +dG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgRUNDIFRMUyAyMDIxMQ0wCwYDVQQKDARBdG9zMQswCQYD +VQQGEwJERTAeFw0yMTA0MjIwOTI2MjNaFw00MTA0MTcwOTI2MjJaMEwxLjAsBgNVBAMMJUF0b3Mg +VHJ1c3RlZFJvb3QgUm9vdCBDQSBFQ0MgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNVBAYT +AkRFMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEloZYKDcKZ9Cg3iQZGeHkBQcfl+3oZIK59sRxUM6K +DP/XtXa7oWyTbIOiaG6l2b4siJVBzV3dscqDY4PMwL502eCdpO5KTlbgmClBk1IQ1SQ4AjJn8ZQS +b+/Xxd4u/RmAo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR2KCXWfeBmmnoJsmo7jjPX +NtNPojAOBgNVHQ8BAf8EBAMCAYYwCgYIKoZIzj0EAwMDaAAwZQIwW5kp85wxtolrbNa9d+F851F+ +uDrNozZffPc8dz7kUK2o59JZDCaOMDtuCCrCp1rIAjEAmeMM56PDr9NJLkaCI2ZdyQAUEv049OGY +a3cpetskz2VAv9LcjBHo9H1/IISpQuQo +-----END CERTIFICATE----- + +Atos TrustedRoot Root CA RSA TLS 2021 +===================================== +-----BEGIN CERTIFICATE----- +MIIFZDCCA0ygAwIBAgIQU9XP5hmTC/srBRLYwiqipDANBgkqhkiG9w0BAQwFADBMMS4wLAYDVQQD +DCVBdG9zIFRydXN0ZWRSb290IFJvb3QgQ0EgUlNBIFRMUyAyMDIxMQ0wCwYDVQQKDARBdG9zMQsw +CQYDVQQGEwJERTAeFw0yMTA0MjIwOTIxMTBaFw00MTA0MTcwOTIxMDlaMEwxLjAsBgNVBAMMJUF0 +b3MgVHJ1c3RlZFJvb3QgUm9vdCBDQSBSU0EgVExTIDIwMjExDTALBgNVBAoMBEF0b3MxCzAJBgNV +BAYTAkRFMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAtoAOxHm9BYx9sKOdTSJNy/BB +l01Z4NH+VoyX8te9j2y3I49f1cTYQcvyAh5x5en2XssIKl4w8i1mx4QbZFc4nXUtVsYvYe+W/CBG +vevUez8/fEc4BKkbqlLfEzfTFRVOvV98r61jx3ncCHvVoOX3W3WsgFWZkmGbzSoXfduP9LVq6hdK +ZChmFSlsAvFr1bqjM9xaZ6cF4r9lthawEO3NUDPJcFDsGY6wx/J0W2tExn2WuZgIWWbeKQGb9Cpt +0xU6kGpn8bRrZtkh68rZYnxGEFzedUlnnkL5/nWpo63/dgpnQOPF943HhZpZnmKaau1Fh5hnstVK +PNe0OwANwI8f4UDErmwh3El+fsqyjW22v5MvoVw+j8rtgI5Y4dtXz4U2OLJxpAmMkokIiEjxQGMY +sluMWuPD0xeqqxmjLBvk1cbiZnrXghmmOxYsL3GHX0WelXOTwkKBIROW1527k2gV+p2kHYzygeBY +Br3JtuP2iV2J+axEoctr+hbxx1A9JNr3w+SH1VbxT5Aw+kUJWdo0zuATHAR8ANSbhqRAvNncTFd+ +rrcztl524WWLZt+NyteYr842mIycg5kDcPOvdO3GDjbnvezBc6eUWsuSZIKmAMFwoW4sKeFYV+xa +fJlrJaSQOoD0IJ2azsct+bJLKZWD6TWNp0lIpw9MGZHQ9b8Q4HECAwEAAaNCMEAwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUdEmZ0f+0emhFdcN+tNzMzjkz2ggwDgYDVR0PAQH/BAQDAgGGMA0G +CSqGSIb3DQEBDAUAA4ICAQAjQ1MkYlxt/T7Cz1UAbMVWiLkO3TriJQ2VSpfKgInuKs1l+NsW4AmS +4BjHeJi78+xCUvuppILXTdiK/ORO/auQxDh1MoSf/7OwKwIzNsAQkG8dnK/haZPso0UvFJ/1TCpl +Q3IM98P4lYsU84UgYt1UU90s3BiVaU+DR3BAM1h3Egyi61IxHkzJqM7F78PRreBrAwA0JrRUITWX +AdxfG/F851X6LWh3e9NpzNMOa7pNdkTWwhWaJuywxfW70Xp0wmzNxbVe9kzmWy2B27O3Opee7c9G +slA9hGCZcbUztVdF5kJHdWoOsAgMrr3e97sPWD2PAzHoPYJQyi9eDF20l74gNAf0xBLh7tew2Vkt +afcxBPTy+av5EzH4AXcOPUIjJsyacmdRIXrMPIWo6iFqO9taPKU0nprALN+AnCng33eU0aKAQv9q +TFsR0PXNor6uzFFcw9VUewyu1rkGd4Di7wcaaMxZUa1+XGdrudviB0JbuAEFWDlN5LuYo7Ey7Nmj +1m+UI/87tyll5gfp77YZ6ufCOB0yiJA8EytuzO+rdwY0d4RPcuSBhPm5dDTedk+SKlOxJTnbPP/l +PqYO5Wue/9vsL3SD3460s6neFE3/MaNFcyT6lSnMEpcEoji2jbDwN/zIIX8/syQbPYtuzE2wFg2W +HYMfRsCbvUOZ58SWLs5fyQ== +-----END CERTIFICATE----- ` diff --git a/vendor/github.com/snowflakedb/gosnowflake/chunk_downloader.go b/vendor/github.com/snowflakedb/gosnowflake/chunk_downloader.go index 86c74509..b68f9ece 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/chunk_downloader.go +++ b/vendor/github.com/snowflakedb/gosnowflake/chunk_downloader.go @@ -17,9 +17,9 @@ import ( "sync" "time" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/ipc" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/ipc" + "github.com/apache/arrow/go/v14/arrow/memory" ) type chunkDownloader interface { @@ -106,10 +106,13 @@ func (scd *snowflakeChunkDownloader) start() error { // if the rowsetbase64 retrieved from the server is empty, move on to downloading chunks var err error var loc *time.Location - if scd.sc != nil { + if scd.sc != nil && scd.sc.cfg != nil { loc = getCurrentLocation(scd.sc.cfg.Params) } - firstArrowChunk := buildFirstArrowChunk(scd.RowSet.RowSetBase64, loc, scd.pool) + firstArrowChunk, err := buildFirstArrowChunk(scd.RowSet.RowSetBase64, loc, scd.pool) + if err != nil { + return err + } higherPrecision := higherPrecisionEnabled(scd.ctx) scd.CurrentChunk, err = firstArrowChunk.decodeArrowChunk(scd.RowSet.RowType, higherPrecision) scd.CurrentChunkSize = firstArrowChunk.rowCount @@ -264,21 +267,28 @@ func getChunk( if err != nil { return nil, err } - return newRetryHTTP(ctx, sc.rest.Client, http.NewRequest, u, headers, timeout).execute() + return newRetryHTTP(ctx, sc.rest.Client, http.NewRequest, u, headers, timeout, sc.rest.MaxRetryCount, sc.currentTimeProvider, sc.cfg).execute() } func (scd *snowflakeChunkDownloader) startArrowBatches() error { + if scd.RowSet.RowSetBase64 == "" { + return nil + } var err error chunkMetaLen := len(scd.ChunkMetas) var loc *time.Location - if scd.sc != nil { + if scd.sc != nil && scd.sc.cfg != nil { loc = getCurrentLocation(scd.sc.cfg.Params) } - firstArrowChunk := buildFirstArrowChunk(scd.RowSet.RowSetBase64, loc, scd.pool) + firstArrowChunk, err := buildFirstArrowChunk(scd.RowSet.RowSetBase64, loc, scd.pool) + if err != nil { + return err + } scd.FirstBatch = &ArrowBatch{ idx: 0, scd: scd, funcDownloadHelper: scd.FuncDownloadHelper, + loc: loc, } // decode first chunk if possible if firstArrowChunk.allocator != nil { @@ -293,6 +303,7 @@ func (scd *snowflakeChunkDownloader) startArrowBatches() error { idx: i, scd: scd, funcDownloadHelper: scd.FuncDownloadHelper, + loc: loc, } } return nil @@ -430,7 +441,7 @@ func decodeChunk(scd *snowflakeChunkDownloader, idx int, bufStream *bufio.Reader return err } var loc *time.Location - if scd.sc != nil { + if scd.sc != nil && scd.sc.cfg != nil { loc = getCurrentLocation(scd.sc.cfg.Params) } arc := arrowResultChunk{ @@ -636,7 +647,7 @@ func (f *httpStreamChunkFetcher) fetch(URL string, rows chan<- []*string) error if err != nil { return err } - res, err := newRetryHTTP(context.Background(), f.client, http.NewRequest, fullURL, f.headers, 0).execute() + res, err := newRetryHTTP(context.Background(), f.client, http.NewRequest, fullURL, f.headers, 0, 0, defaultTimeProvider, nil).execute() if err != nil { return err } @@ -707,6 +718,14 @@ type ArrowBatch struct { rowCount int scd *snowflakeChunkDownloader funcDownloadHelper func(context.Context, *snowflakeChunkDownloader, int) error + ctx context.Context + loc *time.Location +} + +// WithContext sets the context which will be used for this ArrowBatch. +func (rb *ArrowBatch) WithContext(ctx context.Context) *ArrowBatch { + rb.ctx = ctx + return rb } // Fetch returns an array of records representing a chunk in the query @@ -717,7 +736,13 @@ func (rb *ArrowBatch) Fetch() (*[]arrow.Record, error) { rb.rowCount = countArrowBatchRows(rb.rec) return rb.rec, nil } - if err := rb.funcDownloadHelper(context.Background(), rb.scd, rb.idx); err != nil { + var ctx context.Context + if rb.ctx != nil { + ctx = rb.ctx + } else { + ctx = context.Background() + } + if err := rb.funcDownloadHelper(ctx, rb.scd, rb.idx); err != nil { return nil, err } return rb.rec, nil diff --git a/vendor/github.com/snowflakedb/gosnowflake/client.go b/vendor/github.com/snowflakedb/gosnowflake/client.go index 358be47b..14f0e5f1 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/client.go +++ b/vendor/github.com/snowflakedb/gosnowflake/client.go @@ -12,7 +12,7 @@ import ( // InternalClient is implemented by HTTPClient type InternalClient interface { Get(context.Context, *url.URL, map[string]string, time.Duration) (*http.Response, error) - Post(context.Context, *url.URL, map[string]string, []byte, time.Duration, bool) (*http.Response, error) + Post(context.Context, *url.URL, map[string]string, []byte, time.Duration, currentTimeProvider) (*http.Response, error) } type httpClient struct { @@ -33,6 +33,6 @@ func (cli *httpClient) Post( headers map[string]string, body []byte, timeout time.Duration, - raise4xx bool) (*http.Response, error) { - return cli.sr.FuncPost(ctx, cli.sr, url, headers, body, timeout, raise4xx) + currentTimeProvider currentTimeProvider) (*http.Response, error) { + return cli.sr.FuncPost(ctx, cli.sr, url, headers, body, timeout, currentTimeProvider, nil) } diff --git a/vendor/github.com/snowflakedb/gosnowflake/client_configuration.go b/vendor/github.com/snowflakedb/gosnowflake/client_configuration.go new file mode 100644 index 00000000..a7271812 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/client_configuration.go @@ -0,0 +1,148 @@ +// Copyright (c) 2023 Snowflake Computing Inc. All rights reserved. + +package gosnowflake + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "path" + "strings" +) + +// log levels for easy logging +const ( + levelOff string = "OFF" // log level for logging switched off + levelError string = "ERROR" // error log level + levelWarn string = "WARN" // warn log level + levelInfo string = "INFO" // info log level + levelDebug string = "DEBUG" // debug log level + levelTrace string = "TRACE" // trace log level +) + +const ( + defaultConfigName = "sf_client_config.json" + clientConfEnvName = "SF_CLIENT_CONFIG_FILE" +) + +func getClientConfig(filePathFromConnectionString string) (*ClientConfig, error) { + configPredefinedFilePaths := clientConfigPredefinedDirs() + filePath := findClientConfigFilePath(filePathFromConnectionString, configPredefinedFilePaths) + if filePath == "" { // we did not find a config file + return nil, nil + } + return parseClientConfiguration(filePath) +} + +func findClientConfigFilePath(filePathFromConnectionString string, configPredefinedDirs []string) string { + if filePathFromConnectionString != "" { + return filePathFromConnectionString + } + envConfigFilePath := os.Getenv(clientConfEnvName) + if envConfigFilePath != "" { + return envConfigFilePath + } + return searchForConfigFile(configPredefinedDirs) +} + +func searchForConfigFile(directories []string) string { + for _, dir := range directories { + filePath := path.Join(dir, defaultConfigName) + exists, err := existsFile(filePath) + if err != nil { + logger.Errorf("Error while searching for the client config in %s directory: %s", dir, err) + continue + } + if exists { + return filePath + } + } + return "" +} + +func existsFile(filePath string) (bool, error) { + _, err := os.Stat(filePath) + if err == nil { + return true, nil + } + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + return false, err +} + +func clientConfigPredefinedDirs() []string { + homeDir, err := os.UserHomeDir() + if err != nil { + logger.Warnf("Home dir could not be determined: %w", err) + return []string{".", os.TempDir()} + } + return []string{".", homeDir, os.TempDir()} +} + +// ClientConfig config root +type ClientConfig struct { + Common *ClientConfigCommonProps `json:"common"` +} + +// ClientConfigCommonProps properties from "common" section +type ClientConfigCommonProps struct { + LogLevel string `json:"log_level,omitempty"` + LogPath string `json:"log_path,omitempty"` +} + +func parseClientConfiguration(filePath string) (*ClientConfig, error) { + if filePath == "" { + return nil, nil + } + fileContents, err := os.ReadFile(filePath) + if err != nil { + return nil, parsingClientConfigError(err) + } + var clientConfig ClientConfig + err = json.Unmarshal(fileContents, &clientConfig) + if err != nil { + return nil, parsingClientConfigError(err) + } + err = validateClientConfiguration(&clientConfig) + if err != nil { + return nil, parsingClientConfigError(err) + } + return &clientConfig, nil +} + +func parsingClientConfigError(err error) error { + return fmt.Errorf("parsing client config failed: %w", err) +} + +func validateClientConfiguration(clientConfig *ClientConfig) error { + if clientConfig == nil { + return errors.New("client config not found") + } + if clientConfig.Common == nil { + return errors.New("common section in client config not found") + } + return validateLogLevel(*clientConfig) +} + +func validateLogLevel(clientConfig ClientConfig) error { + var logLevel = clientConfig.Common.LogLevel + if logLevel != "" { + _, error := toLogLevel(logLevel) + if error != nil { + return error + } + } + return nil +} + +func toLogLevel(logLevelString string) (string, error) { + var logLevel = strings.ToUpper(logLevelString) + switch logLevel { + case levelOff, levelError, levelWarn, levelInfo, levelDebug, levelTrace: + return logLevel, nil + default: + return "", errors.New("unknown log level: " + logLevelString) + } +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/codecov.yml b/vendor/github.com/snowflakedb/gosnowflake/codecov.yml new file mode 100644 index 00000000..d2380343 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/codecov.yml @@ -0,0 +1,3 @@ +parsers: + go: + partials_as_hits: true \ No newline at end of file diff --git a/vendor/github.com/snowflakedb/gosnowflake/connection.go b/vendor/github.com/snowflakedb/gosnowflake/connection.go index cd08366d..72829062 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/connection.go +++ b/vendor/github.com/snowflakedb/gosnowflake/connection.go @@ -22,7 +22,7 @@ import ( "sync/atomic" "time" - "github.com/apache/arrow/go/v12/arrow/ipc" + "github.com/apache/arrow/go/v14/arrow/ipc" ) const ( @@ -34,12 +34,15 @@ const ( httpHeaderHost = "Host" httpHeaderValueOctetStream = "application/octet-stream" httpHeaderContentEncoding = "Content-Encoding" + httpClientAppID = "CLIENT_APP_ID" + httpClientAppVersion = "CLIENT_APP_VERSION" ) const ( - statementTypeIDMulti = int64(0x1000) + statementTypeIDSelect = int64(0x1000) statementTypeIDDml = int64(0x3000) statementTypeIDMultiTableInsert = statementTypeIDDml + int64(0x500) + statementTypeIDMultistatement = int64(0xA000) ) const ( @@ -57,17 +60,24 @@ const ( queryResultType resultType = "query" ) +type execKey string + +const ( + executionType execKey = "executionType" + executionTypeStatement string = "statement" +) + const privateLinkSuffix = "privatelink.snowflakecomputing.com" type snowflakeConn struct { - ctx context.Context - cfg *Config - rest *snowflakeRestful - SequenceCounter uint64 - QueryID string - SQLState string - telemetry *snowflakeTelemetry - internal InternalClient + ctx context.Context + cfg *Config + rest *snowflakeRestful + SequenceCounter uint64 + telemetry *snowflakeTelemetry + internal InternalClient + queryContextCache *queryContextCache + currentTimeProvider currentTimeProvider } var ( @@ -86,6 +96,10 @@ func (sc *snowflakeConn) exec( var err error counter := atomic.AddUint64(&sc.SequenceCounter, 1) // query sequence counter + queryContext, err := buildQueryContext(sc.queryContextCache) + if err != nil { + logger.Errorf("error while building query context: %v", err) + } req := execRequest{ SQLText: query, AsyncExec: noResult, @@ -93,10 +107,14 @@ func (sc *snowflakeConn) exec( IsInternal: isInternal, DescribeOnly: describeOnly, SequenceID: counter, + QueryContext: queryContext, } if key := ctx.Value(multiStatementCount); key != nil { req.Parameters[string(multiStatementCount)] = key } + if tag := ctx.Value(queryTag); tag != nil { + req.Parameters[string(queryTag)] = tag + } logger.WithContext(ctx).Infof("parameters: %v", req.Parameters) // handle bindings, if required @@ -138,7 +156,17 @@ func (sc *snowflakeConn) exec( } logger.WithContext(ctx).Infof("Success: %v, Code: %v", data.Success, code) if !data.Success { - return nil, (populateErrorFields(code, data)).exceptionTelemetry(sc) + err = (populateErrorFields(code, data)).exceptionTelemetry(sc) + return nil, err + } + + if !sc.cfg.DisableQueryContextCache && data.Data.QueryContext != nil { + queryContext, err := extractQueryContext(data) + if err != nil { + logger.Errorf("error while decoding query context: ", err) + } else { + sc.queryContextCache.add(sc, queryContext.Entries...) + } } // handle PUT/GET commands @@ -150,16 +178,49 @@ func (sc *snowflakeConn) exec( } logger.WithContext(ctx).Info("Exec/Query SUCCESS") - sc.cfg.Database = data.Data.FinalDatabaseName - sc.cfg.Schema = data.Data.FinalSchemaName - sc.cfg.Role = data.Data.FinalRoleName - sc.cfg.Warehouse = data.Data.FinalWarehouseName - sc.QueryID = data.Data.QueryID - sc.SQLState = data.Data.SQLState + if data.Data.FinalDatabaseName != "" { + sc.cfg.Database = data.Data.FinalDatabaseName + } + if data.Data.FinalSchemaName != "" { + sc.cfg.Schema = data.Data.FinalSchemaName + } + if data.Data.FinalWarehouseName != "" { + sc.cfg.Warehouse = data.Data.FinalWarehouseName + } + if data.Data.FinalRoleName != "" { + sc.cfg.Role = data.Data.FinalRoleName + } sc.populateSessionParameters(data.Data.Parameters) return data, err } +func extractQueryContext(data *execResponse) (queryContext, error) { + var queryContext queryContext + err := json.Unmarshal(data.Data.QueryContext, &queryContext) + return queryContext, err +} + +func buildQueryContext(qcc *queryContextCache) (requestQueryContext, error) { + rqc := requestQueryContext{} + if qcc == nil || len(qcc.entries) == 0 { + logger.Debugf("empty qcc") + return rqc, nil + } + for _, qce := range qcc.entries { + contextData := contextData{} + if qce.Context == "" { + contextData.Base64Data = qce.Context + } + rqc.Entries = append(rqc.Entries, requestQueryContextEntry{ + ID: qce.ID, + Priority: qce.Priority, + Timestamp: qce.Timestamp, + Context: contextData, + }) + } + return rqc, nil +} + func (sc *snowflakeConn) Begin() (driver.Tx, error) { return sc.BeginTx(sc.ctx, driver.TxOptions{}) } @@ -209,7 +270,7 @@ func (sc *snowflakeConn) Close() (err error) { sc.stopHeartBeat() defer sc.cleanup() - if !sc.cfg.KeepSessionAlive { + if sc.cfg != nil && !sc.cfg.KeepSessionAlive { if err = sc.rest.FuncCloseSession(sc.ctx, sc.rest, sc.rest.RequestTimeout); err != nil { logger.Error(err) } @@ -278,12 +339,21 @@ func (sc *snowflakeConn) ExecContext( return &snowflakeResult{ affectedRows: updatedRows, insertID: -1, - queryID: sc.QueryID, + queryID: data.Data.QueryID, }, nil // last insert id is not supported by Snowflake } else if isMultiStmt(&data.Data) { return sc.handleMultiExec(ctx, data.Data) + } else if isDql(&data.Data) { + logger.WithContext(ctx).Debugf("DQL") + if isStatementContext(ctx) { + return &snowflakeResultNoRows{queryID: data.Data.QueryID}, nil + } + return driver.ResultNoRows, nil } logger.Debug("DDL") + if isStatementContext(ctx) { + return &snowflakeResultNoRows{queryID: data.Data.QueryID}, nil + } return driver.ResultNoRows, nil } @@ -349,7 +419,7 @@ func (sc *snowflakeConn) queryContextInternal( rows := new(snowflakeRows) rows.sc = sc - rows.queryID = sc.QueryID + rows.queryID = data.Data.QueryID if isMultiStmt(&data.Data) { // handleMultiQuery is responsible to fill rows with childResults @@ -360,7 +430,7 @@ func (sc *snowflakeConn) queryContextInternal( rows.addDownloader(populateChunkDownloader(ctx, sc, data.Data)) } - rows.ChunkDownloader.start() + err = rows.ChunkDownloader.start() return rows, err } @@ -390,6 +460,7 @@ func (sc *snowflakeConn) Ping(ctx context.Context) error { noResult := isAsyncMode(ctx) isDesc := isDescribeOnly(ctx) // TODO: handle isInternal + ctx = setResultType(ctx, execResultType) _, err := sc.exec(ctx, "SELECT 1", noResult, false, /* isInternal */ isDesc, []driver.NamedValue{}) return err @@ -398,10 +469,10 @@ func (sc *snowflakeConn) Ping(ctx context.Context) error { // CheckNamedValue determines which types are handled by this driver aside from // the instances captured by driver.Value func (sc *snowflakeConn) CheckNamedValue(nv *driver.NamedValue) error { - if supported := supportedArrayBind(nv); !supported { - return driver.ErrSkip + if supportedNullBind(nv) || supportedArrayBind(nv) { + return nil } - return nil + return driver.ErrSkip } func (sc *snowflakeConn) GetQueryStatus( @@ -430,7 +501,8 @@ func (sc *snowflakeConn) GetQueryStatus( func (sc *snowflakeConn) QueryArrowStream(ctx context.Context, query string, bindings ...driver.NamedValue) (ArrowStreamLoader, error) { ctx = WithArrowBatches(context.WithValue(ctx, asyncMode, false)) ctx = setResultType(ctx, queryResultType) - data, err := sc.exec(ctx, query, false, false /* isinternal */, false, bindings) + isDesc := isDescribeOnly(ctx) + data, err := sc.exec(ctx, query, false, false /* isinternal */, isDesc, bindings) if err != nil { logger.WithContext(ctx).Errorf("error: %v", err) if data != nil { @@ -574,11 +646,18 @@ func (asb *ArrowStreamBatch) GetStream(ctx context.Context) (io.ReadCloser, erro // ArrowStreamLoader is a convenience interface for downloading // Snowflake results via multiple Arrow Record Batch streams. +// +// Some queries from Snowflake do not return Arrow data regardless +// of the settings, such as "SHOW WAREHOUSES". In these cases, +// you'll find TotalRows() > 0 but GetBatches returns no batches +// and no errors. In this case, the data is accessible via JSONData +// with the actual types matching up to the metadata in RowTypes. type ArrowStreamLoader interface { GetBatches() ([]ArrowStreamBatch, error) TotalRows() int64 RowTypes() []execResponseRowType Location() *time.Location + JSONData() [][]*string } type snowflakeArrowStreamChunkDownloader struct { @@ -601,6 +680,9 @@ func (scd *snowflakeArrowStreamChunkDownloader) TotalRows() int64 { return scd.T func (scd *snowflakeArrowStreamChunkDownloader) RowTypes() []execResponseRowType { return scd.RowSet.RowType } +func (scd *snowflakeArrowStreamChunkDownloader) JSONData() [][]*string { + return scd.RowSet.JSON +} // the server might have had an empty first batch, check if we can decode // that first batch, if not we skip it. @@ -669,10 +751,17 @@ func (scd *snowflakeArrowStreamChunkDownloader) GetBatches() (out []ArrowStreamB func buildSnowflakeConn(ctx context.Context, config Config) (*snowflakeConn, error) { sc := &snowflakeConn{ - SequenceCounter: 0, - ctx: ctx, - cfg: &config, - } + SequenceCounter: 0, + ctx: ctx, + cfg: &config, + queryContextCache: (&queryContextCache{}).init(), + currentTimeProvider: defaultTimeProvider, + } + // Easy logging is temporarily disabled + //err := initEasyLogging(config.ClientConfigFile) + //if err != nil { + // return nil, err + //} var st http.RoundTripper = SnowflakeTransport if sc.cfg.Transporter == nil { if sc.cfg.InsecureMode { @@ -714,11 +803,17 @@ func buildSnowflakeConn(ctx context.Context, config Config) (*snowflakeConn, err Timeout: sc.cfg.ClientTimeout, Transport: st, }, + JWTClient: &http.Client{ + Timeout: sc.cfg.JWTClientTimeout, + Transport: st, + }, TokenAccessor: tokenAccessor, LoginTimeout: sc.cfg.LoginTimeout, RequestTimeout: sc.cfg.RequestTimeout, + MaxRetryCount: sc.cfg.MaxRetryCount, FuncPost: postRestful, FuncGet: getRestful, + FuncAuthPost: postAuthRestful, FuncPostQuery: postRestfulQuery, FuncPostQueryHelper: postRestfulQueryHelper, FuncRenewSession: renewRestfulSession, diff --git a/vendor/github.com/snowflakedb/gosnowflake/connection_util.go b/vendor/github.com/snowflakedb/gosnowflake/connection_util.go index 597bdc2c..54390522 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/connection_util.go +++ b/vendor/github.com/snowflakedb/gosnowflake/connection_util.go @@ -24,20 +24,24 @@ func (sc *snowflakeConn) isClientSessionKeepAliveEnabled() bool { } func (sc *snowflakeConn) startHeartBeat() { - if !sc.isClientSessionKeepAliveEnabled() { + if sc.cfg != nil && !sc.isClientSessionKeepAliveEnabled() { return } - sc.rest.HeartBeat = &heartbeat{ - restful: sc.rest, + if sc.rest != nil { + sc.rest.HeartBeat = &heartbeat{ + restful: sc.rest, + } + sc.rest.HeartBeat.start() } - sc.rest.HeartBeat.start() } func (sc *snowflakeConn) stopHeartBeat() { - if !sc.isClientSessionKeepAliveEnabled() { + if sc.cfg != nil && !sc.isClientSessionKeepAliveEnabled() { return } - sc.rest.HeartBeat.stop() + if sc.rest != nil && sc.rest.HeartBeat != nil { + sc.rest.HeartBeat.stop() + } } func (sc *snowflakeConn) getArrayBindStageThreshold() int { @@ -193,7 +197,14 @@ func isDml(v int64) bool { return statementTypeIDDml <= v && v <= statementTypeIDMultiTableInsert } +func isDql(data *execResponseData) bool { + return data.StatementTypeID == statementTypeIDSelect && !isMultiStmt(data) +} + func updateRows(data execResponseData) (int64, error) { + if data.RowSet == nil { + return 0, nil + } var count int64 for i, n := 0, len(data.RowType); i < n; i++ { v, err := strconv.ParseInt(*data.RowSet[0][i], 10, 64) @@ -209,8 +220,8 @@ func updateRows(data execResponseData) (int64, error) { // Note that the statement type code is also equivalent to type INSERT, so an // additional check of the name is required func isMultiStmt(data *execResponseData) bool { - return data.StatementTypeID == statementTypeIDMulti && - data.RowType[0].Name == "multiple statement execution" + var isMultistatementByReturningSelect = data.StatementTypeID == statementTypeIDSelect && data.RowType[0].Name == "multiple statement execution" + return isMultistatementByReturningSelect || data.StatementTypeID == statementTypeIDMultistatement } func getResumeQueryID(ctx context.Context) (string, error) { @@ -277,12 +288,19 @@ func populateChunkDownloader( func (sc *snowflakeConn) setupOCSPPrivatelink(app string, host string) error { ocspCacheServer := fmt.Sprintf("http://ocsp.%v/ocsp_response_cache.json", host) + logger.Debugf("OCSP Cache Server for Privatelink: %v\n", ocspCacheServer) if err := os.Setenv(cacheServerURLEnv, ocspCacheServer); err != nil { return err } - ocspRetryHost := fmt.Sprintf("http://ocsp.%v/retry/", host) + "%v/%v" - if err := os.Setenv(ocspRetryURLEnv, ocspRetryHost); err != nil { + ocspRetryHostTemplate := fmt.Sprintf("http://ocsp.%v/retry/", host) + "%v/%v" + logger.Debugf("OCSP Retry URL for Privatelink: %v\n", ocspRetryHostTemplate) + if err := os.Setenv(ocspRetryURLEnv, ocspRetryHostTemplate); err != nil { return err } return nil } + +func isStatementContext(ctx context.Context) bool { + v := ctx.Value(executionType) + return v == executionTypeStatement +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/converter.go b/vendor/github.com/snowflakedb/gosnowflake/converter.go index bc01a5c3..ee6aec7d 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/converter.go +++ b/vendor/github.com/snowflakedb/gosnowflake/converter.go @@ -4,6 +4,7 @@ package gosnowflake import ( "context" + "database/sql" "database/sql/driver" "encoding/hex" "fmt" @@ -14,11 +15,11 @@ import ( "strings" "time" - "github.com/apache/arrow/go/v12/arrow" - "github.com/apache/arrow/go/v12/arrow/array" - "github.com/apache/arrow/go/v12/arrow/compute" - "github.com/apache/arrow/go/v12/arrow/decimal128" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow" + "github.com/apache/arrow/go/v14/arrow/array" + "github.com/apache/arrow/go/v14/arrow/compute" + "github.com/apache/arrow/go/v14/arrow/decimal128" + "github.com/apache/arrow/go/v14/arrow/memory" ) const format = "2006-01-02 15:04:05.999999999" @@ -58,13 +59,13 @@ func isInterfaceArrayBinding(t interface{}) bool { // goTypeToSnowflake translates Go data type to Snowflake data type. func goTypeToSnowflake(v driver.Value, tsmode snowflakeType) snowflakeType { switch t := v.(type) { - case int64: + case int64, sql.NullInt64: return fixedType - case float64: + case float64, sql.NullFloat64: return realType - case bool: + case bool, sql.NullBool: return booleanType - case string: + case string, sql.NullString: return textType case []byte: if tsmode == binaryType { @@ -80,7 +81,7 @@ func goTypeToSnowflake(v driver.Value, tsmode snowflakeType) snowflakeType { return unSupportedType } return changeType - case time.Time: + case time.Time, sql.NullTime: return tsmode } if supportedArrayBind(&driver.NamedValue{Value: v}) { @@ -147,34 +148,68 @@ func valueToString(v driver.Value, tsmode snowflakeType) (*string, error) { s := v1.String() return &s, nil case reflect.Struct: - if tm, ok := v.(time.Time); ok { - switch tsmode { - case dateType: - _, offset := tm.Zone() - tm = tm.Add(time.Second * time.Duration(offset)) - s := strconv.FormatInt(tm.Unix()*1000, 10) - return &s, nil - case timeType: - s := fmt.Sprintf("%d", - (tm.Hour()*3600+tm.Minute()*60+tm.Second())*1e9+tm.Nanosecond()) - return &s, nil - case timestampNtzType, timestampLtzType: - unixTime, _ := new(big.Int).SetString(fmt.Sprintf("%d", tm.Unix()), 10) - m, _ := new(big.Int).SetString(strconv.FormatInt(1e9, 10), 10) - unixTime.Mul(unixTime, m) - tmNanos, _ := new(big.Int).SetString(fmt.Sprintf("%d", tm.Nanosecond()), 10) - s := unixTime.Add(unixTime, tmNanos).String() - return &s, nil - case timestampTzType: - _, offset := tm.Zone() - s := fmt.Sprintf("%v %v", tm.UnixNano(), offset/60+1440) - return &s, nil + switch typedVal := v.(type) { + case time.Time: + return timeTypeValueToString(typedVal, tsmode) + case sql.NullTime: + if !typedVal.Valid { + return nil, nil + } + return timeTypeValueToString(typedVal.Time, tsmode) + case sql.NullBool: + if !typedVal.Valid { + return nil, nil } + s := strconv.FormatBool(typedVal.Bool) + return &s, nil + case sql.NullInt64: + if !typedVal.Valid { + return nil, nil + } + s := strconv.FormatInt(typedVal.Int64, 10) + return &s, nil + case sql.NullFloat64: + if !typedVal.Valid { + return nil, nil + } + s := strconv.FormatFloat(typedVal.Float64, 'g', -1, 32) + return &s, nil + case sql.NullString: + if !typedVal.Valid { + return nil, nil + } + return &typedVal.String, nil } } return nil, fmt.Errorf("unsupported type: %v", v1.Kind()) } +func timeTypeValueToString(tm time.Time, tsmode snowflakeType) (*string, error) { + switch tsmode { + case dateType: + _, offset := tm.Zone() + tm = tm.Add(time.Second * time.Duration(offset)) + s := strconv.FormatInt(tm.Unix()*1000, 10) + return &s, nil + case timeType: + s := fmt.Sprintf("%d", + (tm.Hour()*3600+tm.Minute()*60+tm.Second())*1e9+tm.Nanosecond()) + return &s, nil + case timestampNtzType, timestampLtzType: + unixTime, _ := new(big.Int).SetString(fmt.Sprintf("%d", tm.Unix()), 10) + m, _ := new(big.Int).SetString(strconv.FormatInt(1e9, 10), 10) + unixTime.Mul(unixTime, m) + tmNanos, _ := new(big.Int).SetString(fmt.Sprintf("%d", tm.Nanosecond()), 10) + s := unixTime.Add(unixTime, tmNanos).String() + return &s, nil + case timestampTzType: + _, offset := tm.Zone() + s := fmt.Sprintf("%v %v", tm.UnixNano(), offset/60+1440) + return &s, nil + } + return nil, fmt.Errorf("unsupported time type: %v", tsmode) +} + // extractTimestamp extracts the internal timestamp data to epoch time in seconds and milliseconds func extractTimestamp(srcValue *string) (sec int64, nsec int64, err error) { logger.Debugf("SRC: %v", srcValue) @@ -319,6 +354,79 @@ func decimalToBigFloat(num decimal128.Num, scale int64) *big.Float { return new(big.Float).Quo(f, s) } +// ArrowSnowflakeTimestampToTime converts original timestamp returned by Snowflake to time.Time +func (rb *ArrowBatch) ArrowSnowflakeTimestampToTime(rec arrow.Record, colIdx int, recIdx int) *time.Time { + scale := int(rb.scd.RowSet.RowType[colIdx].Scale) + dbType := rb.scd.RowSet.RowType[colIdx].Type + return arrowSnowflakeTimestampToTime(rec.Column(colIdx), getSnowflakeType(dbType), scale, recIdx, rb.loc) +} + +func arrowSnowflakeTimestampToTime( + column arrow.Array, + sfType snowflakeType, + scale int, + recIdx int, + loc *time.Location) *time.Time { + + if column.IsNull(recIdx) { + return nil + } + var ret time.Time + switch sfType { + case timestampNtzType: + if column.DataType().ID() == arrow.STRUCT { + structData := column.(*array.Struct) + epoch := structData.Field(0).(*array.Int64).Int64Values() + fraction := structData.Field(1).(*array.Int32).Int32Values() + ret = time.Unix(epoch[recIdx], int64(fraction[recIdx])).UTC() + } else { + intData := column.(*array.Int64) + value := intData.Value(recIdx) + epoch := extractEpoch(value, scale) + fraction := extractFraction(value, scale) + ret = time.Unix(epoch, fraction).UTC() + } + case timestampLtzType: + if column.DataType().ID() == arrow.STRUCT { + structData := column.(*array.Struct) + epoch := structData.Field(0).(*array.Int64).Int64Values() + fraction := structData.Field(1).(*array.Int32).Int32Values() + ret = time.Unix(epoch[recIdx], int64(fraction[recIdx])).In(loc) + } else { + intData := column.(*array.Int64) + value := intData.Value(recIdx) + epoch := extractEpoch(value, scale) + fraction := extractFraction(value, scale) + ret = time.Unix(epoch, fraction).In(loc) + } + case timestampTzType: + structData := column.(*array.Struct) + if structData.NumField() == 2 { + value := structData.Field(0).(*array.Int64).Int64Values() + timezone := structData.Field(1).(*array.Int32).Int32Values() + epoch := extractEpoch(value[recIdx], scale) + fraction := extractFraction(value[recIdx], scale) + locTz := Location(int(timezone[recIdx]) - 1440) + ret = time.Unix(epoch, fraction).In(locTz) + } else { + epoch := structData.Field(0).(*array.Int64).Int64Values() + fraction := structData.Field(1).(*array.Int32).Int32Values() + timezone := structData.Field(2).(*array.Int32).Int32Values() + locTz := Location(int(timezone[recIdx]) - 1440) + ret = time.Unix(epoch[recIdx], int64(fraction[recIdx])).In(locTz) + } + } + return &ret +} + +func extractEpoch(value int64, scale int) int64 { + return value / int64(math.Pow10(scale)) +} + +func extractFraction(value int64, scale int) int64 { + return (value % int64(math.Pow10(scale))) * int64(math.Pow10(9-scale)) +} + // Arrow Interface (Column) converter. This is called when Arrow chunks are // downloaded to convert to the corresponding row type. func arrowToValue( @@ -334,7 +442,8 @@ func arrowToValue( } logger.Debugf("snowflake data type: %v, arrow data type: %v", srcColumnMeta.Type, srcValue.DataType()) - switch getSnowflakeType(strings.ToUpper(srcColumnMeta.Type)) { + snowflakeType := getSnowflakeType(srcColumnMeta.Type) + switch snowflakeType { case fixedType: // Snowflake data types that are fixed-point numbers will fall into this category // e.g. NUMBER, DECIMAL/NUMERIC, INT/INTEGER @@ -493,69 +602,11 @@ func arrowToValue( } } return err - case timestampNtzType: - if srcValue.DataType().ID() == arrow.STRUCT { - structData := srcValue.(*array.Struct) - epoch := structData.Field(0).(*array.Int64).Int64Values() - fraction := structData.Field(1).(*array.Int32).Int32Values() - for i := range destcol { - if !srcValue.IsNull(i) { - destcol[i] = time.Unix(epoch[i], int64(fraction[i])).UTC() - } - } - } else { - for i, t := range srcValue.(*array.Int64).Int64Values() { - if !srcValue.IsNull(i) { - scale := int(srcColumnMeta.Scale) - epoch := t / int64(math.Pow10(scale)) - fraction := (t % int64(math.Pow10(scale))) * int64(math.Pow10(9-scale)) - destcol[i] = time.Unix(epoch, fraction).UTC() - } - } - } - return err - case timestampLtzType: - if srcValue.DataType().ID() == arrow.STRUCT { - structData := srcValue.(*array.Struct) - epoch := structData.Field(0).(*array.Int64).Int64Values() - fraction := structData.Field(1).(*array.Int32).Int32Values() - for i := range destcol { - if !srcValue.IsNull(i) { - destcol[i] = time.Unix(epoch[i], int64(fraction[i])).In(loc) - } - } - } else { - for i, t := range srcValue.(*array.Int64).Int64Values() { - if !srcValue.IsNull(i) { - q := t / int64(math.Pow10(int(srcColumnMeta.Scale))) - r := t % int64(math.Pow10(int(srcColumnMeta.Scale))) - destcol[i] = time.Unix(q, r).In(loc) - } - } - } - return err - case timestampTzType: - structData := srcValue.(*array.Struct) - if structData.NumField() == 2 { - epoch := structData.Field(0).(*array.Int64).Int64Values() - timezone := structData.Field(1).(*array.Int32).Int32Values() - for i := range destcol { - if !srcValue.IsNull(i) { - loc := Location(int(timezone[i]) - 1440) - tt := time.Unix(epoch[i], 0) - destcol[i] = tt.In(loc) - } - } - } else { - epoch := structData.Field(0).(*array.Int64).Int64Values() - fraction := structData.Field(1).(*array.Int32).Int32Values() - timezone := structData.Field(2).(*array.Int32).Int32Values() - for i := range destcol { - if !srcValue.IsNull(i) { - loc := Location(int(timezone[i]) - 1440) - tt := time.Unix(epoch[i], int64(fraction[i])) - destcol[i] = tt.In(loc) - } + case timestampNtzType, timestampLtzType, timestampTzType: + for i := range destcol { + var ts = arrowSnowflakeTimestampToTime(srcValue, snowflakeType, int(srcColumnMeta.Scale), i, loc) + if ts != nil { + destcol[i] = *ts } } return err @@ -917,22 +968,34 @@ func higherPrecisionEnabled(ctx context.Context) bool { return ok && d } -func arrowToRecord(record arrow.Record, pool memory.Allocator, rowType []execResponseRowType, loc *time.Location) (arrow.Record, error) { - s, err := recordToSchema(record.Schema(), rowType, loc) +func originalTimestampEnabled(ctx context.Context) bool { + v := ctx.Value(enableOriginalTimestamp) + if v == nil { + return false + } + d, ok := v.(bool) + return ok && d +} + +func arrowToRecord(ctx context.Context, record arrow.Record, pool memory.Allocator, rowType []execResponseRowType, loc *time.Location) (arrow.Record, error) { + useOriginalTimestamp := originalTimestampEnabled(ctx) + + s, err := recordToSchema(record.Schema(), rowType, loc, useOriginalTimestamp) if err != nil { return nil, err } var cols []arrow.Array numRows := record.NumRows() - ctx := compute.WithAllocator(context.Background(), pool) + ctxAlloc := compute.WithAllocator(ctx, pool) for i, col := range record.Columns() { srcColumnMeta := rowType[i] // TODO: confirm that it is okay to be using higher precision logic for conversions newCol := col - switch getSnowflakeType(strings.ToUpper(srcColumnMeta.Type)) { + snowflakeType := getSnowflakeType(srcColumnMeta.Type) + switch snowflakeType { case fixedType: var toType arrow.DataType if col.DataType().ID() == arrow.DECIMAL || col.DataType().ID() == arrow.DECIMAL256 { @@ -943,13 +1006,13 @@ func arrowToRecord(record arrow.Record, pool memory.Allocator, rowType []execRes } // we're fine truncating so no error for data loss here. // so we use UnsafeCastOptions. - newCol, err = compute.CastArray(ctx, col, compute.UnsafeCastOptions(toType)) + newCol, err = compute.CastArray(ctxAlloc, col, compute.UnsafeCastOptions(toType)) if err != nil { return nil, err } defer newCol.Release() } else if srcColumnMeta.Scale != 0 { - result, err := compute.Divide(ctx, compute.ArithmeticOptions{NoCheckOverflow: true}, + result, err := compute.Divide(ctxAlloc, compute.ArithmeticOptions{NoCheckOverflow: true}, &compute.ArrayDatum{Value: newCol.Data()}, compute.NewDatum(math.Pow10(int(srcColumnMeta.Scale)))) if err != nil { @@ -960,108 +1023,51 @@ func arrowToRecord(record arrow.Record, pool memory.Allocator, rowType []execRes defer newCol.Release() } case timeType: - newCol, err = compute.CastArray(ctx, col, compute.SafeCastOptions(arrow.FixedWidthTypes.Time64ns)) + newCol, err = compute.CastArray(ctxAlloc, col, compute.SafeCastOptions(arrow.FixedWidthTypes.Time64ns)) if err != nil { return nil, err } defer newCol.Release() - case timestampNtzType: - tb := array.NewTimestampBuilder(pool, &arrow.TimestampType{Unit: arrow.Nanosecond}) - if col.DataType().ID() == arrow.STRUCT { - structData := col.(*array.Struct) - epoch := structData.Field(0).(*array.Int64).Int64Values() - fraction := structData.Field(1).(*array.Int32).Int32Values() - for i := 0; i < int(numRows); i++ { - if !col.IsNull(i) { - val := time.Unix(epoch[i], int64(fraction[i])) - tb.Append(arrow.Timestamp(val.UnixNano())) - } else { - tb.AppendNull() - } - } + case timestampNtzType, timestampLtzType, timestampTzType: + if useOriginalTimestamp { + // do nothing - return timestamp as is } else { - for i, t := range col.(*array.Timestamp).TimestampValues() { - if !col.IsNull(i) { - val := time.Unix(0, int64(t)*int64(math.Pow10(9-int(srcColumnMeta.Scale)))).UTC() - tb.Append(arrow.Timestamp(val.UnixNano())) - } else { - tb.AppendNull() - } - } - } - newCol = tb.NewArray() - defer newCol.Release() - tb.Release() - case timestampLtzType: - tb := array.NewTimestampBuilder(pool, &arrow.TimestampType{Unit: arrow.Nanosecond, TimeZone: loc.String()}) - if col.DataType().ID() == arrow.STRUCT { - structData := col.(*array.Struct) - epoch := structData.Field(0).(*array.Int64).Int64Values() - fraction := structData.Field(1).(*array.Int32).Int32Values() - for i := 0; i < int(numRows); i++ { - if !col.IsNull(i) { - val := time.Unix(epoch[i], int64(fraction[i])) - tb.Append(arrow.Timestamp(val.UnixNano())) - } else { - tb.AppendNull() - } - } - } else { - for i, t := range col.(*array.Timestamp).TimestampValues() { - if !col.IsNull(i) { - q := int64(t) / int64(math.Pow10(int(srcColumnMeta.Scale))) - r := int64(t) % int64(math.Pow10(int(srcColumnMeta.Scale))) - val := time.Unix(q, r) - tb.Append(arrow.Timestamp(val.UnixNano())) - } else { - tb.AppendNull() - } - } - } - newCol = tb.NewArray() - defer newCol.Release() - tb.Release() - case timestampTzType: - tb := array.NewTimestampBuilder(pool, &arrow.TimestampType{Unit: arrow.Nanosecond}) - structData := col.(*array.Struct) - if structData.NumField() == 2 { - epoch := structData.Field(0).(*array.Int64).Int64Values() - timezone := structData.Field(1).(*array.Int32).Int32Values() - for i := 0; i < int(numRows); i++ { - if !col.IsNull(i) { - loc := Location(int(timezone[i]) - 1440) - tt := time.Unix(epoch[i], 0) - val := tt.In(loc) - tb.Append(arrow.Timestamp(val.UnixNano())) - } else { - tb.AppendNull() - } + var tb *array.TimestampBuilder + if snowflakeType == timestampLtzType { + tb = array.NewTimestampBuilder(pool, &arrow.TimestampType{Unit: arrow.Nanosecond, TimeZone: loc.String()}) + } else { + tb = array.NewTimestampBuilder(pool, &arrow.TimestampType{Unit: arrow.Nanosecond}) } - } else { - epoch := structData.Field(0).(*array.Int64).Int64Values() - fraction := structData.Field(1).(*array.Int32).Int32Values() - timezone := structData.Field(2).(*array.Int32).Int32Values() + defer tb.Release() + for i := 0; i < int(numRows); i++ { - if !col.IsNull(i) { - loc := Location(int(timezone[i]) - 1440) - tt := time.Unix(epoch[i], int64(fraction[i])) - val := tt.In(loc) - tb.Append(arrow.Timestamp(val.UnixNano())) + ts := arrowSnowflakeTimestampToTime(col, snowflakeType, int(srcColumnMeta.Scale), i, loc) + if ts != nil { + ar := arrow.Timestamp(ts.UnixNano()) + // in case of overflow in arrow timestamp return error + if ts.Year() != ar.ToTime(arrow.Nanosecond).Year() { + return nil, &SnowflakeError{ + Number: ErrTooHighTimestampPrecision, + SQLState: SQLStateInvalidDataTimeFormat, + Message: fmt.Sprintf("Cannot convert timestamp %v in column %v to Arrow.Timestamp data type due to too high precision. Please use context with WithOriginalTimestamp.", ts.UTC(), srcColumnMeta.Name), + } + } + tb.Append(ar) } else { tb.AppendNull() } } + + newCol = tb.NewArray() + defer newCol.Release() } - newCol = tb.NewArray() - defer newCol.Release() - tb.Release() } cols = append(cols, newCol) } return array.NewRecord(s, cols, numRows), nil } -func recordToSchema(sc *arrow.Schema, rowType []execResponseRowType, loc *time.Location) (*arrow.Schema, error) { +func recordToSchema(sc *arrow.Schema, rowType []execResponseRowType, loc *time.Location, useOriginalTimestamp bool) (*arrow.Schema, error) { var fields []arrow.Field for i := 0; i < len(sc.Fields()); i++ { f := sc.Field(i) @@ -1069,7 +1075,7 @@ func recordToSchema(sc *arrow.Schema, rowType []execResponseRowType, loc *time.L converted := true var t arrow.DataType - switch getSnowflakeType(strings.ToUpper(srcColumnMeta.Type)) { + switch getSnowflakeType(srcColumnMeta.Type) { case fixedType: switch f.Type.ID() { case arrow.DECIMAL: @@ -1088,9 +1094,19 @@ func recordToSchema(sc *arrow.Schema, rowType []execResponseRowType, loc *time.L case timeType: t = &arrow.Time64Type{Unit: arrow.Nanosecond} case timestampNtzType, timestampTzType: - t = &arrow.TimestampType{Unit: arrow.Nanosecond} + if useOriginalTimestamp { + // do nothing - return timestamp as is + converted = false + } else { + t = &arrow.TimestampType{Unit: arrow.Nanosecond} + } case timestampLtzType: - t = &arrow.TimestampType{Unit: arrow.Nanosecond, TimeZone: loc.String()} + if useOriginalTimestamp { + // do nothing - return timestamp as is + converted = false + } else { + t = &arrow.TimestampType{Unit: arrow.Nanosecond, TimeZone: loc.String()} + } default: converted = false } @@ -1109,3 +1125,26 @@ func recordToSchema(sc *arrow.Schema, rowType []execResponseRowType, loc *time.L meta := sc.Metadata() return arrow.NewSchema(fields, &meta), nil } + +// TypedNullTime is required to properly bind the null value with the snowflakeType as the Snowflake functions +// require the type of the field to be provided explicitly for the null values +type TypedNullTime struct { + Time sql.NullTime + TzType timezoneType +} + +func convertTzTypeToSnowflakeType(tzType timezoneType) snowflakeType { + switch tzType { + case TimestampNTZType: + return timestampNtzType + case TimestampLTZType: + return timestampLtzType + case TimestampTZType: + return timestampTzType + case DateType: + return dateType + case TimeType: + return timeType + } + return unSupportedType +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/data1.txt.gz b/vendor/github.com/snowflakedb/gosnowflake/data1.txt.gz new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/snowflakedb/gosnowflake/datatype.go b/vendor/github.com/snowflakedb/gosnowflake/datatype.go index 2c44bebd..db61a90e 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/datatype.go +++ b/vendor/github.com/snowflakedb/gosnowflake/datatype.go @@ -7,6 +7,7 @@ import ( "database/sql" "database/sql/driver" "fmt" + "strings" ) type snowflakeType int @@ -32,27 +33,48 @@ const ( unSupportedType ) -var snowflakeTypes = [...]string{"FIXED", "REAL", "TEXT", "DATE", "VARIANT", - "TIMESTAMP_LTZ", "TIMESTAMP_NTZ", "TIMESTAMP_TZ", "OBJECT", "ARRAY", - "BINARY", "TIME", "BOOLEAN", "NULL", "SLICE", "CHANGE_TYPE", "NOT_SUPPORTED"} +var snowflakeToDriverType = map[string]snowflakeType{ + "FIXED": fixedType, + "REAL": realType, + "TEXT": textType, + "DATE": dateType, + "VARIANT": variantType, + "TIMESTAMP_LTZ": timestampLtzType, + "TIMESTAMP_NTZ": timestampNtzType, + "TIMESTAMP_TZ": timestampTzType, + "OBJECT": objectType, + "ARRAY": arrayType, + "BINARY": binaryType, + "TIME": timeType, + "BOOLEAN": booleanType, + "NULL": nullType, + "SLICE": sliceType, + "CHANGE_TYPE": changeType, + "NOT_SUPPORTED": unSupportedType} -func (st snowflakeType) String() string { - return snowflakeTypes[st] +var driverTypeToSnowflake = invertMap(snowflakeToDriverType) + +func invertMap(m map[string]snowflakeType) map[snowflakeType]string { + inv := make(map[snowflakeType]string) + for k, v := range m { + if _, ok := inv[v]; ok { + panic("failed to create driverTypeToSnowflake map due to duplicated values") + } + inv[v] = k + } + return inv } func (st snowflakeType) Byte() byte { return byte(st) } +func (st snowflakeType) String() string { + return driverTypeToSnowflake[st] +} + func getSnowflakeType(typ string) snowflakeType { - for i, sft := range snowflakeTypes { - if sft == typ { - return snowflakeType(i) - } else if snowflakeType(i) == nullType { - break - } - } - return nullType + return snowflakeToDriverType[strings.ToUpper(typ)] } var ( diff --git a/vendor/github.com/snowflakedb/gosnowflake/doc.go b/vendor/github.com/snowflakedb/gosnowflake/doc.go index 74a4211a..caffeeda 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/doc.go +++ b/vendor/github.com/snowflakedb/gosnowflake/doc.go @@ -7,6 +7,8 @@ Clients can use the database/sql package directly. For example: "database/sql" _ "github.com/snowflakedb/gosnowflake" + + "log" ) func main() { @@ -118,6 +120,12 @@ The following connection parameters are supported: - tracing: Specifies the logging level to be used. Set to error by default. Valid values are trace, debug, info, print, warning, error, fatal, panic. + - disableQueryContextCache: disables parsing of query context returned from server and resending it to server as well. + Default value is false. + + - clientConfigFile: specifies the location of the client configuration json file. + In this file you can configure Easy Logging feature. + All other parameters are interpreted as session parameters (https://docs.snowflake.com/en/sql-reference/parameters.html). For example, the TIMESTAMP_OUTPUT_FORMAT session parameter can be set by adding: @@ -159,6 +167,16 @@ Users can use SetLogger in driver.go to set a customized logger for gosnowflake In order to enable debug logging for the driver, user could use SetLogLevel("debug") in SFLogger interface as shown in demo code at cmd/logger.go. To redirect the logs SFlogger.SetOutput method could do the work. +# Query tag + +A custom query tag can be set in the context. Each query run with this context +will include the custom query tag as metadata that will appear in the Query Tag +column in the Query History log. For example: + + queryTag := "my custom query tag" + ctxWithQueryTag := WithQueryTag(ctx, queryTag) + rows, err := db.QueryContext(ctxWithQueryTag, query) + # Query request ID A specific query request ID can be set in the context and will be passed through @@ -168,6 +186,36 @@ in place of the default randomized request ID. For example: ctxWithID := WithRequestID(ctx, requestID) rows, err := db.QueryContext(ctxWithID, query) +# Last query ID + +If you need query ID for your query you have to use raw connection. + +For queries: +``` + + err := conn.Raw(func(x any) error { + stmt, err := x.(driver.ConnPrepareContext).PrepareContext(ctx, "SELECT 1") + rows, err := stmt.(driver.StmtQueryContext).QueryContext(ctx, nil) + rows.(SnowflakeRows).GetQueryID() + stmt.(SnowflakeStmt).GetQueryID() + return nil + } + +``` + +For execs: +``` + + err := conn.Raw(func(x any) error { + stmt, err := x.(driver.ConnPrepareContext).PrepareContext(ctx, "INSERT INTO TestStatementQueryIdForExecs VALUES (1)") + result, err := stmt.(driver.StmtExecContext).ExecContext(ctx, nil) + result.(SnowflakeResult).GetQueryID() + stmt.(SnowflakeStmt).GetQueryID() + return nil + } + +``` + # Canceling Query by CtrlC From 0.5.0, a signal handling responsibility has moved to the applications. If you want to cancel a @@ -400,6 +448,15 @@ The “?“ inside the “VALUES“ clause specifies that the SQL statement uses Binding data that involves time zones can require special handling. For details, see the section titled "Timestamps with Time Zones". +Version 1.6.23 (and later) of the driver takes advantage of sql.Null types which enables the proper handling of null parameters inside function calls, i.e.: + + rows, err := db.Query("SELECT * FROM TABLE(SOMEFUNCTION(?))", sql.NullBool{}) + +The timestamp nullability had to be achieved by wrapping the sql.NullTime type as the Snowflake provides several date and time types +which are mapped to single Go time.Time type: + + rows, err := db.Query("SELECT * FROM TABLE(SOMEFUNCTION(?))", sf.TypedNullTime{sql.NullTime{}, sf.TimestampLTZType}) + # Binding Parameters to Array Variables Version 1.3.9 (and later) of the Go Snowflake Driver supports the ability to bind an array variable to a parameter in a SQL @@ -568,8 +625,8 @@ or using a Config structure specifying: config := &Config{ ... - Authenticator: "SNOWFLAKE_JWT" - PrivateKey: "" + Authenticator: AuthTypeJwt, + PrivateKey: "", } The should be a base64 URL encoded PKCS8 rsa private key string. One way to encode a byte slice to URL @@ -599,6 +656,34 @@ Note: As of February 2020, Golang's official library does not support passcode-e For security purposes, Snowflake highly recommends that you store the passcode-encrypted private key on the disk and decrypt the key in your application using a library you trust. +JWT tokens are recreated on each retry and they are valid (`exp` claim) for `jwtTimeout` seconds. +Each retry timeout is configured by `jwtClientTimeout`. +Retries are limited by total time of `loginTimeout`. + +# External browser authentication + +The driver allows to authenticate using the external browser. + +When a connection is created, the driver will open the browser window and ask the user to sign in. + +To enable this feature, construct the DSN with field "authenticator=EXTERNALBROWSER" or using a Config structure with +following Authenticator specified: + + config := &Config{ + ... + Authenticator: AuthTypeExternalBrowser, + } + +The external browser authentication implements timeout mechanism. This prevents the driver from hanging interminably when +browser window was closed, or not responding. + +Timeout defaults to 120s and can be changed through setting DSN field "externalBrowserTimeout=240" (time in seconds) +or using a Config structure with following ExternalBrowserTimeout specified: + + config := &Config{ + ExternalBrowserTimeout: 240 * time.Second, // Requires time.Duration + } + # Executing Multiple Statements in One Call This feature is available in version 1.3.8 or later of the driver. @@ -775,12 +860,12 @@ Because the example code above executes only one query and no other activity, th no significant difference in behavior between asynchronous and synchronous behavior. The differences become significant if, for example, you want to perform some other activity after the query starts and before it completes. The example code below starts -multiple queries, which run in the background, and then retrieves the results later. +a query, which run in the background, and then retrieves the results later. This example uses small SELECT statements that do not retrieve enough data to require asynchronous handling. However, the technique works for larger data sets, and for situations where the programmer might want to do other work after starting the queries -and before retrieving the results. +and before retrieving the results. For a more elaborative example please see cmd/async/async.go package gosnowflake @@ -797,28 +882,26 @@ and before retrieving the results. ... func DemonstrateAsyncMode(db *sql.DB) { - // Enable asynchronous mode. - ctx := WithAsyncMode(context.Background()) - // Establish connection - conn, _ := db.Conn(ctx) - - // Unwrap connection - err = conn.Raw(func(x interface{}) error { - // Execute asynchronous query - rows, _ := x.(driver.QueryerContext).QueryContext(ctx, "select 1", nil) - defer rows.Close() - - // Retrieve and check results of the query after casting the result - status := rows.(SnowflakeResult).GetStatus() - if status == QueryStatusComplete { - // do something - } else if status == QueryStatusInProgress { - // do something - } else if status == QueryFailed { - // do something - } - return nil - }) + // Enable asynchronous mode + ctx := sf.WithAsyncMode(context.Background()) + + // Run the query with asynchronous context + rows, err := db.QueryContext(ctx, "select 1") + if err != nil { + // handle error + } + + // do something as the workflow continues whereas the query is computing in the background + ... + + // Get the data when you are ready to handle it + var val int + err = rows.Scan(&val) + if err != nil { + // handle error + } + + ... } # Support For PUT and GET @@ -834,7 +917,7 @@ See the following for information on the syntax and supported parameters: - PUT: https://docs.snowflake.com/en/sql-reference/sql/put.html - GET: https://docs.snowflake.com/en/sql-reference/sql/get.html -# Using PUT +## Using PUT The following example shows how to run a PUT command by passing a string to the db.Query() function: @@ -854,7 +937,7 @@ both an escape character and as a separator in path names. To send information from a stream (rather than a file) use code similar to the code below. (The ReplaceAll() function is needed on Windows to handle backslashes in the path to the file.) - fileStream, _ := os.OpenFile(fname, os.O_RDONLY, os.ModePerm) + fileStream, _ := os.Open(fname) defer func() { if fileStream != nil { fileStream.Close() @@ -870,16 +953,30 @@ To send information from a stream (rather than a file) use code similar to the c Note: PUT statements are not supported for multi-statement queries. -# Using GET +## Using GET The following example shows how to run a GET command by passing a string to the db.Query() function: - db.Query("GET file:// ") + db.Query("GET file:// ") "" should include the file path as well as the name. Snowflake recommends using an absolute path rather than a relative path. For example: - db.Query("GET file:///tmp/my_data_file @~ auto_compress=false overwrite=false") + db.Query("GET @~ file:///tmp/my_data_file auto_compress=false overwrite=false") + +## Specifying temporary directory for encryption and compression + +Putting and getting requires compression and/or encryption, which is done in the OS temporary directory. +If you cannot use default temporary directory for your OS or you want to specify it yourself, you can use "tmpDirPath" DSN parameter. +Remember, to encode slashes. +Example: + + u:p@a.r.c.snowflakecomputing.com/db/s?account=a.r.c&tmpDirPath=%2Fother%2Ftmp + +## Using custom configuration for PUT/GET + +If you want to override some default configuration options, you can use `WithFileTransferOptions` context. +There are multiple config parameters including progress bars or compression. */ package gosnowflake diff --git a/vendor/github.com/snowflakedb/gosnowflake/driver.go b/vendor/github.com/snowflakedb/gosnowflake/driver.go index c088e5d9..6a565be4 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/driver.go +++ b/vendor/github.com/snowflakedb/gosnowflake/driver.go @@ -18,7 +18,7 @@ type SnowflakeDriver struct{} // Open creates a new connection. func (d SnowflakeDriver) Open(dsn string) (driver.Conn, error) { logger.Info("Open") - ctx := context.TODO() + ctx := context.Background() cfg, err := ParseDSN(dsn) if err != nil { return nil, err @@ -27,10 +27,10 @@ func (d SnowflakeDriver) Open(dsn string) (driver.Conn, error) { } // OpenWithConfig creates a new connection with the given Config. -func (d SnowflakeDriver) OpenWithConfig( - ctx context.Context, - config Config) ( - driver.Conn, error) { +func (d SnowflakeDriver) OpenWithConfig(ctx context.Context, config Config) (driver.Conn, error) { + if err := config.Validate(); err != nil { + return nil, err + } if config.Tracing != "" { logger.SetLogLevel(config.Tracing) } diff --git a/vendor/github.com/snowflakedb/gosnowflake/dsn.go b/vendor/github.com/snowflakedb/gosnowflake/dsn.go index ae4decd6..341e8e2b 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/dsn.go +++ b/vendor/github.com/snowflakedb/gosnowflake/dsn.go @@ -4,22 +4,29 @@ package gosnowflake import ( "crypto/rsa" + "crypto/x509" "encoding/base64" + "encoding/pem" + "errors" "fmt" "net" "net/http" "net/url" + "os" "strconv" "strings" "time" ) const ( - defaultClientTimeout = 900 * time.Second // Timeout for network round trip + read out http response - defaultLoginTimeout = 60 * time.Second // Timeout for retry for login EXCLUDING clientTimeout - defaultRequestTimeout = 0 * time.Second // Timeout for retry for request EXCLUDING clientTimeout - defaultJWTTimeout = 60 * time.Second - defaultDomain = ".snowflakecomputing.com" + defaultClientTimeout = 900 * time.Second // Timeout for network round trip + read out http response + defaultJWTClientTimeout = 10 * time.Second // Timeout for network round trip + read out http response but used for JWT auth + defaultLoginTimeout = 300 * time.Second // Timeout for retry for login EXCLUDING clientTimeout + defaultRequestTimeout = 0 * time.Second // Timeout for retry for request EXCLUDING clientTimeout + defaultJWTTimeout = 60 * time.Second + defaultExternalBrowserTimeout = 120 * time.Second // Timeout for external browser login + defaultMaxRetryCount = 7 // specifies maximum number of subsequent retries + defaultDomain = ".snowflakecomputing.com" ) // ConfigBool is a type to represent true or false in the Config @@ -62,10 +69,13 @@ type Config struct { OktaURL *url.URL - LoginTimeout time.Duration // Login retry timeout EXCLUDING network roundtrip and read out http response - RequestTimeout time.Duration // request retry timeout EXCLUDING network roundtrip and read out http response - JWTExpireTimeout time.Duration // JWT expire after timeout - ClientTimeout time.Duration // Timeout for network round trip + read out http response + LoginTimeout time.Duration // Login retry timeout EXCLUDING network roundtrip and read out http response + RequestTimeout time.Duration // request retry timeout EXCLUDING network roundtrip and read out http response + JWTExpireTimeout time.Duration // JWT expire after timeout + ClientTimeout time.Duration // Timeout for network round trip + read out http response + JWTClientTimeout time.Duration // Timeout for network round trip + read out http response used when JWT token auth is taking place + ExternalBrowserTimeout time.Duration // Timeout for external browser login + MaxRetryCount int // Specifies how many times non-periodic HTTP request can be retried Application string // application name. InsecureMode bool // driver doesn't check certificate revocation status @@ -83,10 +93,29 @@ type Config struct { Tracing string // sets logging level + TmpDirPath string // sets temporary directory used by a driver for operations like encrypting, compressing etc + MfaToken string // Internally used to cache the MFA token IDToken string // Internally used to cache the Id Token for external browser ClientRequestMfaToken ConfigBool // When true the MFA token is cached in the credential manager. True by default in Windows/OSX. False for Linux. ClientStoreTemporaryCredential ConfigBool // When true the ID token is cached in the credential manager. True by default in Windows/OSX. False for Linux. + + DisableQueryContextCache bool // Should HTAP query context cache be disabled + + IncludeRetryReason ConfigBool // Should retried request contain retry reason + + ClientConfigFile string // File path to the client configuration json file +} + +// Validate enables testing if config is correct. +// A driver client may call it manually, but it is also called during opening first connection. +func (c *Config) Validate() error { + if c.TmpDirPath != "" { + if _, err := os.Stat(c.TmpDirPath); err != nil { + return err + } + } + return nil } // ocspMode returns the OCSP mode in string INSECURE, FAIL_OPEN, FAIL_CLOSED @@ -118,7 +147,7 @@ func DSN(cfg *Config) (dsn string, err error) { posDot := strings.Index(cfg.Account, ".") if posDot > 0 { if cfg.Region != "" { - return "", ErrInvalidRegion + return "", errInvalidRegion() } cfg.Region = cfg.Account[posDot+1:] cfg.Account = cfg.Account[:posDot] @@ -163,6 +192,9 @@ func DSN(cfg *Config) (dsn string, err error) { if cfg.ClientTimeout != defaultClientTimeout { params.Add("clientTimeout", strconv.FormatInt(int64(cfg.ClientTimeout/time.Second), 10)) } + if cfg.JWTClientTimeout != defaultJWTClientTimeout { + params.Add("jwtClientTimeout", strconv.FormatInt(int64(cfg.JWTClientTimeout/time.Second), 10)) + } if cfg.LoginTimeout != defaultLoginTimeout { params.Add("loginTimeout", strconv.FormatInt(int64(cfg.LoginTimeout/time.Second), 10)) } @@ -172,6 +204,12 @@ func DSN(cfg *Config) (dsn string, err error) { if cfg.JWTExpireTimeout != defaultJWTTimeout { params.Add("jwtTimeout", strconv.FormatInt(int64(cfg.JWTExpireTimeout/time.Second), 10)) } + if cfg.ExternalBrowserTimeout != defaultExternalBrowserTimeout { + params.Add("externalBrowserTimeout", strconv.FormatInt(int64(cfg.ExternalBrowserTimeout/time.Second), 10)) + } + if cfg.MaxRetryCount != defaultMaxRetryCount { + params.Add("maxRetryCount", strconv.Itoa(cfg.MaxRetryCount)) + } if cfg.Application != clientType { params.Add("application", cfg.Application) } @@ -200,6 +238,15 @@ func DSN(cfg *Config) (dsn string, err error) { if cfg.Tracing != "" { params.Add("tracing", cfg.Tracing) } + if cfg.TmpDirPath != "" { + params.Add("tmpDirPath", cfg.TmpDirPath) + } + if cfg.DisableQueryContextCache { + params.Add("disableQueryContextCache", "true") + } + if cfg.IncludeRetryReason == ConfigBoolFalse { + params.Add("includeRetryReason", "false") + } params.Add("ocspFailOpen", strconv.FormatBool(cfg.OCSPFailOpen != OCSPFailOpenFalse)) @@ -212,6 +259,9 @@ func DSN(cfg *Config) (dsn string, err error) { if cfg.ClientStoreTemporaryCredential != configBoolNotSet { params.Add("clientStoreTemporaryCredential", strconv.FormatBool(cfg.ClientStoreTemporaryCredential != ConfigBoolFalse)) } + if cfg.ClientConfigFile != "" { + params.Add("clientConfigFile", cfg.ClientConfigFile) + } dsn = fmt.Sprintf("%v:%v@%v:%v", url.QueryEscape(cfg.User), url.QueryEscape(cfg.Password), cfg.Host, cfg.Port) if params.Encode() != "" { @@ -373,20 +423,15 @@ func fillMissingConfigParameters(cfg *Config) error { } } if strings.Trim(cfg.Account, " ") == "" { - return ErrEmptyAccount + return errEmptyAccount() } - if cfg.Authenticator != AuthTypeOAuth && strings.Trim(cfg.User, " ") == "" { - // oauth does not require a username - return ErrEmptyUsername + if authRequiresUser(cfg) && strings.TrimSpace(cfg.User) == "" { + return errEmptyUsername() } - if cfg.Authenticator != AuthTypeExternalBrowser && - cfg.Authenticator != AuthTypeOAuth && - cfg.Authenticator != AuthTypeJwt && - strings.Trim(cfg.Password, " ") == "" { - // no password parameter is required for EXTERNALBROWSER, OAUTH or JWT. - return ErrEmptyPassword + if authRequiresPassword(cfg) && strings.TrimSpace(cfg.Password) == "" { + return errEmptyPassword() } if strings.Trim(cfg.Protocol, " ") == "" { cfg.Protocol = "https" @@ -425,6 +470,15 @@ func fillMissingConfigParameters(cfg *Config) error { if cfg.ClientTimeout == 0 { cfg.ClientTimeout = defaultClientTimeout } + if cfg.JWTClientTimeout == 0 { + cfg.JWTClientTimeout = defaultJWTClientTimeout + } + if cfg.ExternalBrowserTimeout == 0 { + cfg.ExternalBrowserTimeout = defaultExternalBrowserTimeout + } + if cfg.MaxRetryCount == 0 { + cfg.MaxRetryCount = defaultMaxRetryCount + } if strings.Trim(cfg.Application, " ") == "" { cfg.Application = clientType } @@ -437,6 +491,10 @@ func fillMissingConfigParameters(cfg *Config) error { cfg.ValidateDefaultParameters = ConfigBoolTrue } + if cfg.IncludeRetryReason == configBoolNotSet { + cfg.IncludeRetryReason = ConfigBoolTrue + } + if strings.HasSuffix(cfg.Host, defaultDomain) && len(cfg.Host) == len(defaultDomain) { return &SnowflakeError{ Number: ErrCodeFailedToParseHost, @@ -447,6 +505,19 @@ func fillMissingConfigParameters(cfg *Config) error { return nil } +func authRequiresUser(cfg *Config) bool { + return cfg.Authenticator != AuthTypeOAuth && + cfg.Authenticator != AuthTypeTokenAccessor && + cfg.Authenticator != AuthTypeExternalBrowser +} + +func authRequiresPassword(cfg *Config) bool { + return cfg.Authenticator != AuthTypeOAuth && + cfg.Authenticator != AuthTypeTokenAccessor && + cfg.Authenticator != AuthTypeExternalBrowser && + cfg.Authenticator != AuthTypeJwt +} + // transformAccountToHost transforms host to account name func transformAccountToHost(cfg *Config) (err error) { if cfg.Port == 0 && !strings.HasSuffix(cfg.Host, defaultDomain) && cfg.Host != "" { @@ -554,6 +625,11 @@ func parseDSNParams(cfg *Config, params string) (err error) { if err != nil { return } + case "jwtClientTimeout": + cfg.JWTClientTimeout, err = parseTimeout(value) + if err != nil { + return + } case "loginTimeout": cfg.LoginTimeout, err = parseTimeout(value) if err != nil { @@ -569,6 +645,16 @@ func parseDSNParams(cfg *Config, params string) (err error) { if err != nil { return err } + case "externalBrowserTimeout": + cfg.ExternalBrowserTimeout, err = parseTimeout(value) + if err != nil { + return err + } + case "maxRetryCount": + cfg.MaxRetryCount, err = strconv.Atoi(value) + if err != nil { + return err + } case "application": cfg.Application = value case "authenticator": @@ -646,6 +732,28 @@ func parseDSNParams(cfg *Config, params string) (err error) { } case "tracing": cfg.Tracing = value + case "tmpDirPath": + cfg.TmpDirPath = value + case "disableQueryContextCache": + var b bool + b, err = strconv.ParseBool(value) + if err != nil { + return + } + cfg.DisableQueryContextCache = b + case "includeRetryReason": + var vv bool + vv, err = strconv.ParseBool(value) + if err != nil { + return + } + if vv { + cfg.IncludeRetryReason = ConfigBoolTrue + } else { + cfg.IncludeRetryReason = ConfigBoolFalse + } + case "clientConfigFile": + cfg.ClientConfigFile = value default: if cfg.Params == nil { cfg.Params = make(map[string]*string) @@ -665,3 +773,105 @@ func parseTimeout(value string) (time.Duration, error) { } return time.Duration(vv * int64(time.Second)), nil } + +// ConfigParam is used to bind the name of the Config field with the environment variable and set the requirement for it +type ConfigParam struct { + Name string + EnvName string + FailOnMissing bool +} + +// GetConfigFromEnv is used to parse the environment variable values to specific fields of the Config +func GetConfigFromEnv(properties []*ConfigParam) (*Config, error) { + var account, user, password, role, host, portStr, protocol, warehouse, database, schema, region, passcode, application string + var privateKey *rsa.PrivateKey + var err error + if len(properties) == 0 || properties == nil { + return nil, errors.New("missing configuration parameters for the connection") + } + for _, prop := range properties { + value, err := GetFromEnv(prop.EnvName, prop.FailOnMissing) + if err != nil { + return nil, err + } + switch prop.Name { + case "Account": + account = value + case "User": + user = value + case "Password": + password = value + case "Role": + role = value + case "Host": + host = value + case "Port": + portStr = value + case "Protocol": + protocol = value + case "Warehouse": + warehouse = value + case "Database": + database = value + case "Region": + region = value + case "Passcode": + passcode = value + case "Schema": + schema = value + case "Application": + application = value + case "PrivateKey": + privateKey, err = parsePrivateKeyFromFile(value) + if err != nil { + return nil, err + } + } + } + + port := 443 // snowflake default port + if len(portStr) > 0 { + port, err = strconv.Atoi(portStr) + if err != nil { + return nil, err + } + } + + cfg := &Config{ + Account: account, + User: user, + Password: password, + Role: role, + Host: host, + Port: port, + Protocol: protocol, + Warehouse: warehouse, + Database: database, + Schema: schema, + PrivateKey: privateKey, + Region: region, + Passcode: passcode, + Application: application, + } + return cfg, nil +} + +func parsePrivateKeyFromFile(path string) (*rsa.PrivateKey, error) { + bytes, err := os.ReadFile(path) + if err != nil { + return nil, err + } + block, _ := pem.Decode(bytes) + if block == nil { + return nil, errors.New("failed to parse PEM block containing the private key") + } + privateKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + pk, ok := privateKey.(*rsa.PrivateKey) + if !ok { + return nil, fmt.Errorf("interface convertion. expected type *rsa.PrivateKey, but got %T", privateKey) + } + return pk, nil +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/easy_logging.go b/vendor/github.com/snowflakedb/gosnowflake/easy_logging.go new file mode 100644 index 00000000..a4d782dc --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/easy_logging.go @@ -0,0 +1,154 @@ +package gosnowflake + +import ( + "errors" + "io" + "os" + "path" + "strings" +) + +type initTrials struct { + everTriedToInitialize bool + clientConfigFileInput string + configureCounter int +} + +var easyLoggingInitTrials = initTrials{ + everTriedToInitialize: false, + clientConfigFileInput: "", + configureCounter: 0, +} + +func (i *initTrials) setInitTrial(clientConfigFileInput string) { + i.everTriedToInitialize = true + i.clientConfigFileInput = clientConfigFileInput +} + +func (i *initTrials) increaseReconfigureCounter() { + i.configureCounter++ +} + +func (i *initTrials) reset() { + i.everTriedToInitialize = false + i.clientConfigFileInput = "" + i.configureCounter = 0 +} + +//lint:ignore U1000 Ignore unused function +func initEasyLogging(clientConfigFileInput string) error { + if !allowedToInitialize(clientConfigFileInput) { + return nil + } + config, err := getClientConfig(clientConfigFileInput) + if err != nil { + return easyLoggingInitError(err) + } + if config == nil { + easyLoggingInitTrials.setInitTrial(clientConfigFileInput) + return nil + } + var logLevel string + logLevel, err = getLogLevel(config.Common.LogLevel) + if err != nil { + return easyLoggingInitError(err) + } + var logPath string + logPath, err = getLogPath(config.Common.LogPath) + if err != nil { + return easyLoggingInitError(err) + } + err = reconfigureEasyLogging(logLevel, logPath) + easyLoggingInitTrials.setInitTrial(clientConfigFileInput) + easyLoggingInitTrials.increaseReconfigureCounter() + return err +} + +func easyLoggingInitError(err error) error { + return &SnowflakeError{ + Number: ErrCodeClientConfigFailed, + Message: errMsgClientConfigFailed, + MessageArgs: []interface{}{err.Error()}, + } +} + +func reconfigureEasyLogging(logLevel string, logPath string) error { + newLogger := CreateDefaultLogger() + err := newLogger.SetLogLevel(logLevel) + if err != nil { + return err + } + var output io.Writer + var file *os.File + output, file, err = createLogWriter(logPath) + if err != nil { + return err + } + newLogger.SetOutput(output) + err = newLogger.CloseFileOnLoggerReplace(file) + if err != nil { + logger.Errorf("%s", err) + } + logger.Replace(&newLogger) + return nil +} + +func createLogWriter(logPath string) (io.Writer, *os.File, error) { + if strings.EqualFold(logPath, "STDOUT") { + return os.Stdout, nil, nil + } + logFileName := path.Join(logPath, "snowflake.log") + file, err := os.OpenFile(logFileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640) + if err != nil { + return nil, nil, err + } + return file, file, nil +} + +func allowedToInitialize(clientConfigFileInput string) bool { + triedToInitializeWithoutConfigFile := easyLoggingInitTrials.everTriedToInitialize && easyLoggingInitTrials.clientConfigFileInput == "" + isAllowedToInitialize := !easyLoggingInitTrials.everTriedToInitialize || (triedToInitializeWithoutConfigFile && clientConfigFileInput != "") + if !isAllowedToInitialize && easyLoggingInitTrials.clientConfigFileInput != clientConfigFileInput { + logger.Warnf("Easy logging will not be configured for CLIENT_CONFIG_FILE=%s because it was previously configured for a different client config", clientConfigFileInput) + } + return isAllowedToInitialize +} + +func getLogLevel(logLevel string) (string, error) { + if logLevel == "" { + logger.Warn("LogLevel in client config not found. Using default value: OFF") + return levelOff, nil + } + return toLogLevel(logLevel) +} + +func getLogPath(logPath string) (string, error) { + logPathOrDefault := logPath + if logPath == "" { + logPathOrDefault = os.TempDir() + logger.Warnf("LogPath in client config not found. Using temporary directory as a default value: %s", logPathOrDefault) + } + pathWithGoSubdir := path.Join(logPathOrDefault, "go") + exists, err := dirExists(pathWithGoSubdir) + if err != nil { + return "", err + } + if !exists { + err = os.MkdirAll(pathWithGoSubdir, 0755) + if err != nil { + return "", err + } + } + return pathWithGoSubdir, nil +} + +func dirExists(dirPath string) (bool, error) { + stat, err := os.Stat(dirPath) + if err == nil { + return stat.IsDir(), nil + } + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + return false, err +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/encrypt_util.go b/vendor/github.com/snowflakedb/gosnowflake/encrypt_util.go index cf4ca152..08179891 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/encrypt_util.go +++ b/vendor/github.com/snowflakedb/gosnowflake/encrypt_util.go @@ -176,10 +176,13 @@ func encryptFile( if err != nil { return nil, "", err } - infile, err := os.OpenFile(filename, os.O_CREATE|os.O_RDONLY, os.ModePerm) + defer tmpOutputFile.Close() + infile, err := os.OpenFile(filename, os.O_CREATE|os.O_RDONLY, readWriteFileMode) if err != nil { return nil, "", err } + defer infile.Close() + meta, err := encryptStream(sfe, infile, tmpOutputFile, chunkSize) if err != nil { return nil, "", err @@ -232,7 +235,7 @@ func decryptFile( return "", err } defer tmpOutputFile.Close() - infile, err := os.OpenFile(filename, os.O_RDONLY, os.ModePerm) + infile, err := os.Open(filename) if err != nil { return "", err } diff --git a/vendor/github.com/snowflakedb/gosnowflake/errors.go b/vendor/github.com/snowflakedb/gosnowflake/errors.go index c49fc921..b41ad272 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/errors.go +++ b/vendor/github.com/snowflakedb/gosnowflake/errors.go @@ -66,7 +66,7 @@ func (se *SnowflakeError) generateTelemetryExceptionData() *telemetryData { } func (se *SnowflakeError) sendExceptionTelemetry(sc *snowflakeConn, data *telemetryData) error { - if sc != nil { + if sc != nil && sc.telemetry != nil { return sc.telemetry.addLog(data) } return nil // TODO oob telemetry @@ -82,7 +82,7 @@ func (se *SnowflakeError) exceptionTelemetry(sc *snowflakeConn) *SnowflakeError // return populated error fields replacing the default response func populateErrorFields(code int, data *execResponse) *SnowflakeError { - err := ErrUnknownError + err := errUnknownError() if code != -1 { err.Number = code } @@ -125,6 +125,8 @@ const ( ErrCodePrivateKeyParseError = 260010 // ErrCodeFailedToParseAuthenticator is an error code for the case where a DNS includes an invalid authenticator ErrCodeFailedToParseAuthenticator = 260011 + // ErrCodeClientConfigFailed is an error code for the case where clientConfigFile is invalid or applying client configuration fails + ErrCodeClientConfigFailed = 260012 /* network */ @@ -216,6 +218,8 @@ const ( ErrInvalidOffsetStr = 268001 // ErrInvalidBinaryHexForm is an error code for the case where a binary data in hex form is invalid. ErrInvalidBinaryHexForm = 268002 + // ErrTooHighTimestampPrecision is an error code for the case where cannot convert Snowflake timestamp to arrow.Timestamp + ErrTooHighTimestampPrecision = 268003 /* OCSP */ @@ -288,34 +292,55 @@ const ( errMsgNoResultIDs = "no result IDs returned with the multi-statement query" errMsgQueryStatus = "server ErrorCode=%s, ErrorMessage=%s" errMsgInvalidPadding = "invalid padding on input" + errMsgClientConfigFailed = "client configuration failed: %v" ) -var ( - // ErrEmptyAccount is returned if a DNS doesn't include account parameter. - ErrEmptyAccount = &SnowflakeError{ +// Returned if a DNS doesn't include account parameter. +func errEmptyAccount() *SnowflakeError { + return &SnowflakeError{ Number: ErrCodeEmptyAccountCode, Message: "account is empty", } - // ErrEmptyUsername is returned if a DNS doesn't include user parameter. - ErrEmptyUsername = &SnowflakeError{ +} + +// Returned if a DNS doesn't include user parameter. +func errEmptyUsername() *SnowflakeError { + return &SnowflakeError{ Number: ErrCodeEmptyUsernameCode, Message: "user is empty", } - // ErrEmptyPassword is returned if a DNS doesn't include password parameter. - ErrEmptyPassword = &SnowflakeError{ +} + +// Returned if a DNS doesn't include password parameter. +func errEmptyPassword() *SnowflakeError { + return &SnowflakeError{ Number: ErrCodeEmptyPasswordCode, - Message: "password is empty"} + Message: "password is empty", + } +} - // ErrInvalidRegion is returned if a DSN's implicit region from account parameter and explicit region parameter conflict. - ErrInvalidRegion = &SnowflakeError{ +// Returned if a DSN's implicit region from account parameter and explicit region parameter conflict. +func errInvalidRegion() *SnowflakeError { + return &SnowflakeError{ Number: ErrCodeRegionOverlap, - Message: "two regions specified"} + Message: "two regions specified", + } +} + +// Returned if a DSN includes an invalid authenticator. +func errFailedToParseAuthenticator() *SnowflakeError { + return &SnowflakeError{ + Number: ErrCodeFailedToParseAuthenticator, + Message: "failed to parse an authenticator", + } +} - // ErrUnknownError is returned if the server side returns an error without meaningful message. - ErrUnknownError = &SnowflakeError{ +// Returned if the server side returns an error without meaningful message. +func errUnknownError() *SnowflakeError { + return &SnowflakeError{ Number: -1, SQLState: "-1", Message: "an unknown server side error occurred", QueryID: "-1", } -) +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/file_transfer_agent.go b/vendor/github.com/snowflakedb/gosnowflake/file_transfer_agent.go index 7c0809e4..ae63f9a8 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/file_transfer_agent.go +++ b/vendor/github.com/snowflakedb/gosnowflake/file_transfer_agent.go @@ -92,7 +92,6 @@ type SnowflakeFileTransferOptions struct { compressSourceFromStream bool /* PUT */ - DisablePutOverwrite bool putCallback *snowflakeProgressPercentage putAzureCallback *snowflakeProgressPercentage putCallbackOutputStream *io.Writer @@ -176,7 +175,7 @@ func (sfa *snowflakeFileTransferAgent) execute() error { if sfa.stageLocationType != local { sizeThreshold := sfa.options.MultiPartThreshold meta.options.MultiPartThreshold = sizeThreshold - if meta.srcFileSize > sizeThreshold { + if meta.srcFileSize > sizeThreshold && sfa.commandType == uploadCommand { meta.parallel = sfa.parallel largeFileMetas = append(largeFileMetas, meta) } else { @@ -194,7 +193,7 @@ func (sfa *snowflakeFileTransferAgent) execute() error { return err } } else { - if err = sfa.download(largeFileMetas, smallFileMetas); err != nil { + if err = sfa.download(smallFileMetas); err != nil { return err } } @@ -268,7 +267,7 @@ func (sfa *snowflakeFileTransferAgent) parseCommand() error { if sfa.data.Parallel != 0 { sfa.parallel = sfa.data.Parallel } - sfa.overwrite = !sfa.options.DisablePutOverwrite + sfa.overwrite = sfa.data.Overwrite sfa.stageLocationType = cloudType(strings.ToUpper(sfa.data.StageInfo.LocationType)) sfa.stageInfo = &sfa.data.StageInfo sfa.presignedURLs = make([]string, 0) @@ -697,31 +696,19 @@ func (sfa *snowflakeFileTransferAgent) upload( } func (sfa *snowflakeFileTransferAgent) download( - largeFileMetadata []*fileMetadata, - smallFileMetadata []*fileMetadata) error { + fileMetadata []*fileMetadata) error { client, err := sfa.getStorageClient(sfa.stageLocationType). createClient(sfa.stageInfo, sfa.useAccelerateEndpoint) if err != nil { return err } - for _, meta := range smallFileMetadata { - meta.client = client - } - for _, meta := range largeFileMetadata { + for _, meta := range fileMetadata { meta.client = client } - if len(smallFileMetadata) > 0 { - logger.WithContext(sfa.sc.ctx).Infof("downloading %v small files", len(smallFileMetadata)) - if err = sfa.downloadFilesParallel(smallFileMetadata); err != nil { - return err - } - } - if len(largeFileMetadata) > 0 { - logger.WithContext(sfa.sc.ctx).Infof("downloading %v large files", len(largeFileMetadata)) - if err = sfa.downloadFilesSequential(largeFileMetadata); err != nil { - return err - } + logger.WithContext(sfa.sc.ctx).Infof("downloading %v files", len(fileMetadata)) + if err = sfa.downloadFilesParallel(fileMetadata); err != nil { + return err } return nil } @@ -746,6 +733,23 @@ func (sfa *snowflakeFileTransferAgent) uploadFilesParallel(fileMetas []*fileMeta } wg.Wait() + // append errors with no result associated to separate array + var errorMessages []string + for i, result := range results { + if result == nil { + if errors[i] == nil { + errorMessages = append(errorMessages, "unknown error") + } else { + errorMessages = append(errorMessages, errors[i].Error()) + } + } + } + if errorMessages != nil { + // sort the error messages to be more deterministic as the goroutines may finish in different order each time + sort.Strings(errorMessages) + return fmt.Errorf("errors during file upload:\n%v", strings.Join(errorMessages, "\n")) + } + retryMeta := make([]*fileMetadata, 0) for i, result := range results { result.errorDetails = errors[i] @@ -831,7 +835,7 @@ func (sfa *snowflakeFileTransferAgent) uploadFilesSequential(fileMetas []*fileMe func (sfa *snowflakeFileTransferAgent) uploadOneFile(meta *fileMetadata) (*fileMetadata, error) { meta.realSrcFileName = meta.srcFileName - tmpDir, err := os.MkdirTemp("", "") + tmpDir, err := os.MkdirTemp(sfa.sc.cfg.TmpDirPath, "") if err != nil { return nil, err } @@ -945,40 +949,8 @@ func (sfa *snowflakeFileTransferAgent) downloadFilesParallel(fileMetas []*fileMe return err } -func (sfa *snowflakeFileTransferAgent) downloadFilesSequential(fileMetas []*fileMetadata) error { - idx := 0 - fileMetaLen := len(fileMetas) - for idx < fileMetaLen { - res, err := sfa.downloadOneFile(fileMetas[idx]) - if err != nil { - return err - } - - if res.resStatus == renewToken { - client, err := sfa.renewExpiredClient() - if err != nil { - return err - } - for i := idx; i < fileMetaLen; i++ { - fileMetas[i].client = client - } - continue - } else if res.resStatus == renewPresignedURL { - sfa.updateFileMetadataWithPresignedURL() - continue - } - - sfa.results = append(sfa.results, res) - idx++ - if injectWaitPut > 0 { - time.Sleep(injectWaitPut) - } - } - return nil -} - func (sfa *snowflakeFileTransferAgent) downloadOneFile(meta *fileMetadata) (*fileMetadata, error) { - tmpDir, err := os.MkdirTemp("", "") + tmpDir, err := os.MkdirTemp(sfa.sc.cfg.TmpDirPath, "") if err != nil { return nil, err } @@ -1183,21 +1155,21 @@ type snowflakeProgressPercentage struct { func (spp *snowflakeProgressPercentage) call(bytesAmount int64) { if spp.outputStream != nil { spp.seenSoFar += bytesAmount - percentage := percent(spp.seenSoFar, spp.fileSize) + percentage := spp.percent(spp.seenSoFar, spp.fileSize) if !spp.done { - spp.done = updateProgress(spp.filename, spp.startTime, spp.fileSize, percentage, spp.outputStream, spp.showProgressBar) + spp.done = spp.updateProgress(spp.filename, spp.startTime, spp.fileSize, percentage, spp.outputStream, spp.showProgressBar) } } } -func percent(seenSoFar int64, size float64) float64 { +func (spp *snowflakeProgressPercentage) percent(seenSoFar int64, size float64) float64 { if float64(seenSoFar) >= size || size <= 0 { return 1.0 } return float64(seenSoFar) / size } -func updateProgress(filename string, startTime time.Time, totalSize float64, progress float64, outputStream *io.Writer, showProgressBar bool) bool { +func (spp *snowflakeProgressPercentage) updateProgress(filename string, startTime time.Time, totalSize float64, progress float64, outputStream *io.Writer, showProgressBar bool) bool { barLength := 10 totalSize /= mb status := "" diff --git a/vendor/github.com/snowflakedb/gosnowflake/file_util.go b/vendor/github.com/snowflakedb/gosnowflake/file_util.go index 4a0b08dd..13740bb8 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/file_util.go +++ b/vendor/github.com/snowflakedb/gosnowflake/file_util.go @@ -19,7 +19,8 @@ type snowflakeFileUtil struct { } const ( - fileChunkSize = 16 * 4 * 1024 + fileChunkSize = 16 * 4 * 1024 + readWriteFileMode os.FileMode = 0666 ) func (util *snowflakeFileUtil) compressFileWithGzipFromStream(srcStream **bytes.Buffer) (*bytes.Buffer, int, error) { @@ -39,12 +40,12 @@ func (util *snowflakeFileUtil) compressFileWithGzip(fileName string, tmpDir stri basename := baseName(fileName) gzipFileName := filepath.Join(tmpDir, basename+"_c.gz") - fr, err := os.OpenFile(fileName, os.O_RDONLY, os.ModePerm) + fr, err := os.Open(fileName) if err != nil { return "", -1, err } defer fr.Close() - fw, err := os.OpenFile(gzipFileName, os.O_WRONLY|os.O_CREATE, os.ModePerm) + fw, err := os.OpenFile(gzipFileName, os.O_WRONLY|os.O_CREATE, readWriteFileMode) if err != nil { return "", -1, err } diff --git a/vendor/github.com/snowflakedb/gosnowflake/gcs_storage_client.go b/vendor/github.com/snowflakedb/gosnowflake/gcs_storage_client.go index dbd674bc..b52f0e32 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/gcs_storage_client.go +++ b/vendor/github.com/snowflakedb/gosnowflake/gcs_storage_client.go @@ -34,7 +34,7 @@ func (util *snowflakeGcsClient) createClient(info *execResponseStageInfo, _ bool logger.Debug("Using GCS downscoped token") return info.Creds.GcsAccessToken, nil } - logger.Debug("No access token received from GS, using presigned url") + logger.Debugf("No access token received from GS, using presigned url: %s", info.PresignedURL) return "", nil } @@ -54,7 +54,10 @@ func (util *snowflakeGcsClient) getFileHeader(meta *fileMetadata, filename strin if err != nil { return nil, err } - accessToken := meta.client.(string) + accessToken, ok := meta.client.(string) + if !ok { + return nil, fmt.Errorf("interface convertion. expected type string but got %T", meta.client) + } gcsHeaders := map[string]string{ "Authorization": "Bearer " + accessToken, } @@ -66,8 +69,7 @@ func (util *snowflakeGcsClient) getFileHeader(meta *fileMetadata, filename strin for k, v := range gcsHeaders { req.Header.Add(k, v) } - var client gcsAPI - client = &http.Client{} + client := newGcsClient() // for testing only if meta.mockGcsClient != nil { client = meta.mockGcsClient @@ -145,7 +147,11 @@ func (util *snowflakeGcsClient) uploadFile( if err != nil { return err } - accessToken = meta.client.(string) + var ok bool + accessToken, ok = meta.client.(string) + if !ok { + return fmt.Errorf("interface convertion. expected type string but got %T", meta.client) + } } var contentEncoding string @@ -196,7 +202,7 @@ func (util *snowflakeGcsClient) uploadFile( uploadSrc = meta.realSrcStream } } else { - uploadSrc, err = os.OpenFile(dataFile, os.O_RDONLY, os.ModePerm) + uploadSrc, err = os.Open(dataFile) if err != nil { return err } @@ -209,8 +215,7 @@ func (util *snowflakeGcsClient) uploadFile( for k, v := range gcsHeaders { req.Header.Add(k, v) } - var client gcsAPI - client = &http.Client{} + client := newGcsClient() // for testing only if meta.mockGcsClient != nil { client = meta.mockGcsClient @@ -271,7 +276,11 @@ func (util *snowflakeGcsClient) nativeDownloadFile( if err != nil { return err } - accessToken = meta.client.(string) + var ok bool + accessToken, ok = meta.client.(string) + if !ok { + return fmt.Errorf("interface convertion. expected type string but got %T", meta.client) + } if accessToken != "" { gcsHeaders["Authorization"] = "Bearer " + accessToken } @@ -284,8 +293,7 @@ func (util *snowflakeGcsClient) nativeDownloadFile( for k, v := range gcsHeaders { req.Header.Add(k, v) } - var client gcsAPI - client = &http.Client{} + client := newGcsClient() // for testing only if meta.mockGcsClient != nil { client = meta.mockGcsClient @@ -314,7 +322,7 @@ func (util *snowflakeGcsClient) nativeDownloadFile( return meta.lastError } - f, err := os.OpenFile(fullDstFileName, os.O_CREATE|os.O_WRONLY, os.ModePerm) + f, err := os.OpenFile(fullDstFileName, os.O_CREATE|os.O_WRONLY, readWriteFileMode) if err != nil { return err } @@ -379,3 +387,9 @@ func (util *snowflakeGcsClient) generateFileURL(stageLocation string, filename s func (util *snowflakeGcsClient) isTokenExpired(resp *http.Response) bool { return resp.StatusCode == 401 } + +func newGcsClient() gcsAPI { + return &http.Client{ + Transport: SnowflakeTransport, + } +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go b/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go index 8f9b020d..4b40ef60 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go +++ b/vendor/github.com/snowflakedb/gosnowflake/heartbeat.go @@ -62,7 +62,7 @@ func (hc *heartbeat) heartbeatMain() error { fullURL := hc.restful.getFullURL(heartBeatPath, params) timeout := hc.restful.RequestTimeout - resp, err := hc.restful.FuncPost(context.Background(), hc.restful, fullURL, headers, nil, timeout, false) + resp, err := hc.restful.FuncPost(context.Background(), hc.restful, fullURL, headers, nil, timeout, defaultTimeProvider, nil) if err != nil { return err } diff --git a/vendor/github.com/snowflakedb/gosnowflake/htap.go b/vendor/github.com/snowflakedb/gosnowflake/htap.go new file mode 100644 index 00000000..3d1e7c70 --- /dev/null +++ b/vendor/github.com/snowflakedb/gosnowflake/htap.go @@ -0,0 +1,94 @@ +package gosnowflake + +import ( + "sort" + "strconv" + "sync" +) + +const ( + queryContextCacheSizeParamName = "QUERY_CONTEXT_CACHE_SIZE" + defaultQueryContextCacheSize = 5 +) + +type queryContext struct { + Entries []queryContextEntry `json:"entries,omitempty"` +} + +type queryContextEntry struct { + ID int `json:"id"` + Timestamp int64 `json:"timestamp"` + Priority int `json:"priority"` + Context string `json:"context,omitempty"` +} + +type queryContextCache struct { + mutex *sync.Mutex + entries []queryContextEntry +} + +func (qcc *queryContextCache) init() *queryContextCache { + qcc.mutex = &sync.Mutex{} + return qcc +} + +func (qcc *queryContextCache) add(sc *snowflakeConn, qces ...queryContextEntry) { + qcc.mutex.Lock() + defer qcc.mutex.Unlock() + if len(qces) == 0 { + qcc.prune(0) + } else { + for _, newQce := range qces { + logger.Debugf("adding query context: %v", newQce) + newQceProcessed := false + for existingQceIdx, existingQce := range qcc.entries { + if newQce.ID == existingQce.ID { + newQceProcessed = true + if newQce.Timestamp > existingQce.Timestamp { + qcc.entries[existingQceIdx] = newQce + } else if newQce.Timestamp == existingQce.Timestamp { + if newQce.Priority != existingQce.Priority { + qcc.entries[existingQceIdx] = newQce + } + } + } + } + if !newQceProcessed { + for existingQceIdx, existingQce := range qcc.entries { + if newQce.Priority == existingQce.Priority { + qcc.entries[existingQceIdx] = newQce + newQceProcessed = true + } + } + } + if !newQceProcessed { + qcc.entries = append(qcc.entries, newQce) + } + } + sort.Slice(qcc.entries, func(idx1, idx2 int) bool { + return qcc.entries[idx1].Priority < qcc.entries[idx2].Priority + }) + qcc.prune(qcc.getQueryContextCacheSize(sc)) + } +} + +func (qcc *queryContextCache) prune(size int) { + if len(qcc.entries) > size { + qcc.entries = qcc.entries[0:size] + } +} + +func (qcc *queryContextCache) getQueryContextCacheSize(sc *snowflakeConn) int { + paramsMutex.Lock() + sizeStr, ok := sc.cfg.Params[queryContextCacheSizeParamName] + paramsMutex.Unlock() + if ok { + size, err := strconv.Atoi(*sizeStr) + if err != nil { + logger.Warnf("cannot parse %v as int as query context cache size: %v", sizeStr, err) + } else { + return size + } + } + return defaultQueryContextCacheSize +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/local_storage_client.go b/vendor/github.com/snowflakedb/gosnowflake/local_storage_client.go index 883b8e3f..2ae072b6 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/local_storage_client.go +++ b/vendor/github.com/snowflakedb/gosnowflake/local_storage_client.go @@ -47,7 +47,7 @@ func (util *localUtil) uploadOneFileWithRetry(meta *fileMetadata) error { return nil } } - output, err := os.OpenFile(filepath.Join(user, meta.dstFileName), os.O_CREATE|os.O_WRONLY, os.ModePerm) + output, err := os.OpenFile(filepath.Join(user, meta.dstFileName), os.O_CREATE|os.O_WRONLY, readWriteFileMode) if err != nil { return err } @@ -100,7 +100,7 @@ func (util *localUtil) downloadOneFile(meta *fileMetadata) error { if err != nil { return err } - if err = os.WriteFile(fullDstFileName, data, os.ModePerm); err != nil { + if err = os.WriteFile(fullDstFileName, data, readWriteFileMode); err != nil { return err } fi, err := os.Stat(fullDstFileName) diff --git a/vendor/github.com/snowflakedb/gosnowflake/location.go b/vendor/github.com/snowflakedb/gosnowflake/location.go index 15575799..6209f318 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/location.go +++ b/vendor/github.com/snowflakedb/gosnowflake/location.go @@ -95,7 +95,7 @@ func getCurrentLocation(params map[string]*string) *time.Location { loc := time.Now().Location() var err error paramsMutex.Lock() - if tz, ok := params["timezone"]; ok { + if tz, ok := params["timezone"]; ok && tz != nil { loc, err = time.LoadLocation(*tz) if err != nil { loc = time.Now().Location() diff --git a/vendor/github.com/snowflakedb/gosnowflake/log.go b/vendor/github.com/snowflakedb/gosnowflake/log.go index 87fb6986..b48294cb 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/log.go +++ b/vendor/github.com/snowflakedb/gosnowflake/log.go @@ -7,8 +7,10 @@ import ( "fmt" rlog "github.com/sirupsen/logrus" "io" + "os" "path" "runtime" + "strings" "time" ) @@ -25,8 +27,11 @@ var LogKeys = [...]contextKey{SFSessionIDKey, SFSessionUserKey} type SFLogger interface { rlog.Ext1FieldLogger SetLogLevel(level string) error + GetLogLevel() string WithContext(ctx context.Context) *rlog.Entry SetOutput(output io.Writer) + CloseFileOnLoggerReplace(file *os.File) error + Replace(newLogger *SFLogger) } // SFCallerPrettyfier to provide base file name and function name from calling frame used in SFLogger @@ -35,19 +40,57 @@ func SFCallerPrettyfier(frame *runtime.Frame) (string, string) { } type defaultLogger struct { - inner *rlog.Logger + inner *rlog.Logger + enabled bool + file *os.File } // SetLogLevel set logging level for calling defaultLogger func (log *defaultLogger) SetLogLevel(level string) error { - actualLevel, err := rlog.ParseLevel(level) - if err != nil { - return err + newEnabled := strings.ToUpper(level) != "OFF" + log.enabled = newEnabled + if newEnabled { + actualLevel, err := rlog.ParseLevel(level) + if err != nil { + return err + } + log.inner.SetLevel(actualLevel) } - log.inner.SetLevel(actualLevel) return nil } +// GetLogLevel return current log level +func (log *defaultLogger) GetLogLevel() string { + if !log.enabled { + return "OFF" + } + return log.inner.GetLevel().String() +} + +// CloseFileOnLoggerReplace set a file to be closed when releasing resources occupied by the logger +func (log *defaultLogger) CloseFileOnLoggerReplace(file *os.File) error { + if log.file != nil && log.file != file { + return fmt.Errorf("could not set a file to close on logger reset because there were already set one") + } + log.file = file + return nil +} + +// Replace substitute logger by a given one +func (log *defaultLogger) Replace(newLogger *SFLogger) { + SetLogger(newLogger) + closeLogFile(log.file) +} + +func closeLogFile(file *os.File) { + if file != nil { + err := file.Close() + if err != nil { + logger.Errorf("failed to close log file: %s", err) + } + } +} + // WithContext return Entry to include fields in context func (log *defaultLogger) WithContext(ctx context.Context) *rlog.Entry { fields := context2Fields(ctx) @@ -60,7 +103,7 @@ func CreateDefaultLogger() SFLogger { var formatter = rlog.TextFormatter{CallerPrettyfier: SFCallerPrettyfier} rLogger.SetReportCaller(true) rLogger.SetFormatter(&formatter) - var ret = defaultLogger{inner: rLogger} + var ret = defaultLogger{inner: rLogger, enabled: true} return &ret //(&ret).(*SFLogger) } @@ -95,39 +138,57 @@ func (log *defaultLogger) Logf(level rlog.Level, format string, args ...interfac } func (log *defaultLogger) Tracef(format string, args ...interface{}) { - log.inner.Tracef(format, args...) + if log.enabled { + log.inner.Tracef(format, args...) + } } func (log *defaultLogger) Debugf(format string, args ...interface{}) { - log.inner.Debugf(format, args...) + if log.enabled { + log.inner.Debugf(format, args...) + } } func (log *defaultLogger) Infof(format string, args ...interface{}) { - log.inner.Infof(format, args...) + if log.enabled { + log.inner.Infof(format, args...) + } } func (log *defaultLogger) Printf(format string, args ...interface{}) { - log.inner.Printf(format, args...) + if log.enabled { + log.inner.Printf(format, args...) + } } func (log *defaultLogger) Warnf(format string, args ...interface{}) { - log.inner.Warnf(format, args...) + if log.enabled { + log.inner.Warnf(format, args...) + } } func (log *defaultLogger) Warningf(format string, args ...interface{}) { - log.inner.Warningf(format, args...) + if log.enabled { + log.inner.Warningf(format, args...) + } } func (log *defaultLogger) Errorf(format string, args ...interface{}) { - log.inner.Errorf(format, args...) + if log.enabled { + log.inner.Errorf(format, args...) + } } func (log *defaultLogger) Fatalf(format string, args ...interface{}) { - log.inner.Fatalf(format, args...) + if log.enabled { + log.inner.Fatalf(format, args...) + } } func (log *defaultLogger) Panicf(format string, args ...interface{}) { - log.inner.Panicf(format, args...) + if log.enabled { + log.inner.Panicf(format, args...) + } } func (log *defaultLogger) Log(level rlog.Level, args ...interface{}) { @@ -139,75 +200,111 @@ func (log *defaultLogger) LogFn(level rlog.Level, fn rlog.LogFunction) { } func (log *defaultLogger) Trace(args ...interface{}) { - log.inner.Trace(args...) + if log.enabled { + log.inner.Trace(args...) + } } func (log *defaultLogger) Debug(args ...interface{}) { - log.inner.Debug(args...) + if log.enabled { + log.inner.Debug(args...) + } } func (log *defaultLogger) Info(args ...interface{}) { - log.inner.Info(args...) + if log.enabled { + log.inner.Info(args...) + } } func (log *defaultLogger) Print(args ...interface{}) { - log.inner.Print(args...) + if log.enabled { + log.inner.Print(args...) + } } func (log *defaultLogger) Warn(args ...interface{}) { - log.inner.Warn(args...) + if log.enabled { + log.inner.Warn(args...) + } } func (log *defaultLogger) Warning(args ...interface{}) { - log.inner.Warning(args...) + if log.enabled { + log.inner.Warning(args...) + } } func (log *defaultLogger) Error(args ...interface{}) { - log.inner.Error(args...) + if log.enabled { + log.inner.Error(args...) + } } func (log *defaultLogger) Fatal(args ...interface{}) { - log.inner.Fatal(args...) + if log.enabled { + log.inner.Fatal(args...) + } } func (log *defaultLogger) Panic(args ...interface{}) { - log.inner.Panic(args...) + if log.enabled { + log.inner.Panic(args...) + } } func (log *defaultLogger) TraceFn(fn rlog.LogFunction) { - log.inner.TraceFn(fn) + if log.enabled { + log.inner.TraceFn(fn) + } } func (log *defaultLogger) DebugFn(fn rlog.LogFunction) { - log.inner.DebugFn(fn) + if log.enabled { + log.inner.DebugFn(fn) + } } func (log *defaultLogger) InfoFn(fn rlog.LogFunction) { - log.inner.InfoFn(fn) + if log.enabled { + log.inner.InfoFn(fn) + } } func (log *defaultLogger) PrintFn(fn rlog.LogFunction) { - log.inner.PrintFn(fn) + if log.enabled { + log.inner.PrintFn(fn) + } } func (log *defaultLogger) WarnFn(fn rlog.LogFunction) { - log.inner.PrintFn(fn) + if log.enabled { + log.inner.PrintFn(fn) + } } func (log *defaultLogger) WarningFn(fn rlog.LogFunction) { - log.inner.WarningFn(fn) + if log.enabled { + log.inner.WarningFn(fn) + } } func (log *defaultLogger) ErrorFn(fn rlog.LogFunction) { - log.inner.ErrorFn(fn) + if log.enabled { + log.inner.ErrorFn(fn) + } } func (log *defaultLogger) FatalFn(fn rlog.LogFunction) { - log.inner.FatalFn(fn) + if log.enabled { + log.inner.FatalFn(fn) + } } func (log *defaultLogger) PanicFn(fn rlog.LogFunction) { - log.inner.PanicFn(fn) + if log.enabled { + log.inner.PanicFn(fn) + } } func (log *defaultLogger) Logln(level rlog.Level, args ...interface{}) { @@ -215,39 +312,57 @@ func (log *defaultLogger) Logln(level rlog.Level, args ...interface{}) { } func (log *defaultLogger) Traceln(args ...interface{}) { - log.inner.Traceln(args...) + if log.enabled { + log.inner.Traceln(args...) + } } func (log *defaultLogger) Debugln(args ...interface{}) { - log.inner.Debugln(args...) + if log.enabled { + log.inner.Debugln(args...) + } } func (log *defaultLogger) Infoln(args ...interface{}) { - log.inner.Infoln(args...) + if log.enabled { + log.inner.Infoln(args...) + } } func (log *defaultLogger) Println(args ...interface{}) { - log.inner.Println(args...) + if log.enabled { + log.inner.Println(args...) + } } func (log *defaultLogger) Warnln(args ...interface{}) { - log.inner.Warnln(args...) + if log.enabled { + log.inner.Warnln(args...) + } } func (log *defaultLogger) Warningln(args ...interface{}) { - log.inner.Warningln(args...) + if log.enabled { + log.inner.Warningln(args...) + } } func (log *defaultLogger) Errorln(args ...interface{}) { - log.inner.Errorln(args...) + if log.enabled { + log.inner.Errorln(args...) + } } func (log *defaultLogger) Fatalln(args ...interface{}) { - log.inner.Fatalln(args...) + if log.enabled { + log.inner.Fatalln(args...) + } } func (log *defaultLogger) Panicln(args ...interface{}) { - log.inner.Panicln(args...) + if log.enabled { + log.inner.Panicln(args...) + } } func (log *defaultLogger) Exit(code int) { diff --git a/vendor/github.com/snowflakedb/gosnowflake/monitoring.go b/vendor/github.com/snowflakedb/gosnowflake/monitoring.go index 8ac4c1dc..a138ecc8 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/monitoring.go +++ b/vendor/github.com/snowflakedb/gosnowflake/monitoring.go @@ -9,7 +9,6 @@ import ( "fmt" "net/url" "strconv" - "time" ) const urlQueriesResultFmt = "/queries/%s/result" @@ -137,7 +136,7 @@ func (sc *snowflakeConn) checkQueryStatus( if tok, _, _ := sc.rest.TokenAccessor.GetTokens(); tok != "" { headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, tok) } - resultPath := fmt.Sprintf("/monitoring/queries/%s", qid) + resultPath := fmt.Sprintf("%s/%s", monitoringQueriesPath, qid) url := sc.rest.getFullURL(resultPath, ¶m) res, err := sc.rest.FuncGet(ctx, sc.rest, url, headers, sc.rest.RequestTimeout) @@ -208,7 +207,7 @@ func (sc *snowflakeConn) getQueryResultResp( paramsMutex.Unlock() param := make(url.Values) param.Add(requestIDKey, getOrGenerateRequestIDFromContext(ctx).String()) - param.Add("clientStartTime", strconv.FormatInt(time.Now().Unix(), 10)) + param.Add("clientStartTime", strconv.FormatInt(sc.currentTimeProvider.currentTime(), 10)) param.Add(requestGUIDKey, NewUUID().String()) token, _, _ := sc.rest.TokenAccessor.GetTokens() if token != "" { @@ -266,6 +265,6 @@ func (sc *snowflakeConn) buildRowsForRunningQuery( if err := sc.rowsForRunningQuery(ctx, qid, rows); err != nil { return nil, err } - rows.ChunkDownloader.start() - return rows, nil + err := rows.ChunkDownloader.start() + return rows, err } diff --git a/vendor/github.com/snowflakedb/gosnowflake/multistatement.go b/vendor/github.com/snowflakedb/gosnowflake/multistatement.go index c8b13e21..ce9d9910 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/multistatement.go +++ b/vendor/github.com/snowflakedb/gosnowflake/multistatement.go @@ -78,7 +78,7 @@ func (sc *snowflakeConn) handleMultiExec( return &snowflakeResult{ affectedRows: updatedRows, insertID: -1, - queryID: sc.QueryID, + queryID: data.QueryID, }, nil } diff --git a/vendor/github.com/snowflakedb/gosnowflake/ocsp.go b/vendor/github.com/snowflakedb/gosnowflake/ocsp.go index 8894a343..b700a416 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/ocsp.go +++ b/vendor/github.com/snowflakedb/gosnowflake/ocsp.go @@ -141,9 +141,21 @@ type certIDKey struct { SerialNumber string } +type certCacheValue struct { + ts float64 + ocspRespBase64 string +} + +type parsedOcspRespKey struct { + ocspRespBase64 string + certIDBase64 string +} + var ( - ocspResponseCache map[certIDKey][]interface{} - ocspResponseCacheLock *sync.RWMutex + ocspResponseCache map[certIDKey]*certCacheValue + ocspParsedRespCache map[parsedOcspRespKey]*ocspStatus + ocspResponseCacheLock *sync.RWMutex + ocspParsedRespCacheLock *sync.Mutex ) // copied from crypto/ocsp @@ -216,7 +228,7 @@ func extractCertIDKeyFromRequest(ocspReq []byte) (*certIDKey, *ocspStatus) { } } -func encodeCertIDKey(certIDKeyBase64 string) *certIDKey { +func decodeCertIDKey(certIDKeyBase64 string) *certIDKey { r, err := base64.StdEncoding.DecodeString(certIDKeyBase64) if err != nil { return nil @@ -239,7 +251,7 @@ func encodeCertIDKey(certIDKeyBase64 string) *certIDKey { } } -func decodeCertIDKey(k *certIDKey) string { +func encodeCertIDKey(k *certIDKey) string { serialNumber := new(big.Int) serialNumber.SetString(k.SerialNumber, 10) nameHash, err := base64.StdEncoding.DecodeString(k.NameHash) @@ -265,26 +277,36 @@ func decodeCertIDKey(k *certIDKey) string { return base64.StdEncoding.EncodeToString(encodedCertID) } -func checkOCSPResponseCache(encodedCertID *certIDKey, subject, issuer *x509.Certificate) *ocspStatus { +func checkOCSPResponseCache(certIDKey *certIDKey, subject, issuer *x509.Certificate) *ocspStatus { if strings.EqualFold(os.Getenv(cacheServerEnabledEnv), "false") { return &ocspStatus{code: ocspNoServer} } - ocspResponseCacheLock.RLock() - gotValueFromCache := ocspResponseCache[*encodedCertID] - ocspResponseCacheLock.RUnlock() - status := extractOCSPCacheResponseValue(gotValueFromCache, subject, issuer) + gotValueFromCache, ok := func() (*certCacheValue, bool) { + ocspResponseCacheLock.RLock() + defer ocspResponseCacheLock.RUnlock() + valueFromCache, ok := ocspResponseCache[*certIDKey] + return valueFromCache, ok + }() + if !ok { + return &ocspStatus{ + code: ocspMissedCache, + err: fmt.Errorf("miss cache data. subject: %v", subject), + } + } + + status := extractOCSPCacheResponseValue(certIDKey, gotValueFromCache, subject, issuer) if !isValidOCSPStatus(status.code) { - deleteOCSPCache(encodedCertID) + deleteOCSPCache(certIDKey) } return status } func deleteOCSPCache(encodedCertID *certIDKey) { ocspResponseCacheLock.Lock() + defer ocspResponseCacheLock.Unlock() delete(ocspResponseCache, *encodedCertID) cacheUpdated = true - ocspResponseCacheLock.Unlock() } func validateOCSP(ocspRes *ocsp.Response) *ocspStatus { @@ -354,20 +376,20 @@ func checkOCSPCacheServer( req requestFunc, ocspServerHost *url.URL, totalTimeout time.Duration) ( - cacheContent *map[string][]interface{}, + cacheContent *map[string]*certCacheValue, ocspS *ocspStatus) { var respd map[string][]interface{} headers := make(map[string]string) - res, err := newRetryHTTP(ctx, client, req, ocspServerHost, headers, totalTimeout).execute() + res, err := newRetryHTTP(ctx, client, req, ocspServerHost, headers, totalTimeout, defaultMaxRetryCount, defaultTimeProvider, nil).execute() if err != nil { - logger.Errorf("failed to get OCSP cache from OCSP Cache Server. %v\n", err) + logger.Errorf("failed to get OCSP cache from OCSP Cache Server. %v", err) return nil, &ocspStatus{ code: ocspFailedSubmit, err: err, } } defer res.Body.Close() - logger.Debugf("StatusCode from OCSP Cache Server: %v\n", res.StatusCode) + logger.Debugf("StatusCode from OCSP Cache Server: %v", res.StatusCode) if res.StatusCode != http.StatusOK { return nil, &ocspStatus{ code: ocspFailedResponse, @@ -381,14 +403,22 @@ func checkOCSPCacheServer( if err := dec.Decode(&respd); err == io.EOF { break } else if err != nil { - logger.Errorf("failed to decode OCSP cache. %v\n", err) + logger.Errorf("failed to decode OCSP cache. %v", err) return nil, &ocspStatus{ code: ocspFailedExtractResponse, err: err, } } } - return &respd, &ocspStatus{ + buf := make(map[string]*certCacheValue) + for key, value := range respd { + ok, ts, ocspRespBase64 := extractTsAndOcspRespBase64(value) + if !ok { + continue + } + buf[key] = &certCacheValue{ts, ocspRespBase64} + } + return &buf, &ocspStatus{ code: ocspSuccess, } } @@ -413,7 +443,7 @@ func retryOCSP( } res, err := newRetryHTTP( ctx, client, req, ocspHost, headers, - totalTimeout*time.Duration(multiplier)).doPost().setBody(reqBody).execute() + totalTimeout*time.Duration(multiplier), defaultMaxRetryCount, defaultTimeProvider, nil).doPost().setBody(reqBody).execute() if err != nil { return ocspRes, ocspResBytes, &ocspStatus{ code: ocspFailedSubmit, @@ -428,7 +458,6 @@ func retryOCSP( err: fmt.Errorf("HTTP code is not OK. %v: %v", res.StatusCode, res.Status), } } - logger.Debug("reading contents") ocspResBytes, err = io.ReadAll(res.Body) if err != nil { return ocspRes, ocspResBytes, &ocspStatus{ @@ -436,7 +465,59 @@ func retryOCSP( err: err, } } - logger.Debug("parsing OCSP response") + ocspRes, err = ocsp.ParseResponse(ocspResBytes, issuer) + if err != nil { + logger.Warnf("error when parsing ocsp response: %v", err) + logger.Warnf("performing GET fallback request to OCSP") + return fallbackRetryOCSPToGETRequest(ctx, client, req, ocspHost, headers, issuer, totalTimeout) + } + + logger.Debugf("OCSP Status from server: %v", printStatus(ocspRes)) + return ocspRes, ocspResBytes, &ocspStatus{ + code: ocspSuccess, + } +} + +// fallbackRetryOCSPToGETRequest is the third level of retry method. Some OCSP responders do not support POST requests +// and will return with a "malformed" request error. In that case we also try to perform a GET request +func fallbackRetryOCSPToGETRequest( + ctx context.Context, + client clientInterface, + req requestFunc, + ocspHost *url.URL, + headers map[string]string, + issuer *x509.Certificate, + totalTimeout time.Duration) ( + ocspRes *ocsp.Response, + ocspResBytes []byte, + ocspS *ocspStatus) { + multiplier := 1 + if atomic.LoadUint32((*uint32)(&ocspFailOpen)) == (uint32)(OCSPFailOpenFalse) { + multiplier = 3 // up to 3 times for Fail Close mode + } + res, err := newRetryHTTP(ctx, client, req, ocspHost, headers, + totalTimeout*time.Duration(multiplier), defaultMaxRetryCount, defaultTimeProvider, nil).execute() + if err != nil { + return ocspRes, ocspResBytes, &ocspStatus{ + code: ocspFailedSubmit, + err: err, + } + } + defer res.Body.Close() + logger.Debugf("GET fallback StatusCode from OCSP Server: %v", res.StatusCode) + if res.StatusCode != http.StatusOK { + return ocspRes, ocspResBytes, &ocspStatus{ + code: ocspFailedResponse, + err: fmt.Errorf("HTTP code is not OK. %v: %v", res.StatusCode, res.Status), + } + } + ocspResBytes, err = io.ReadAll(res.Body) + if err != nil { + return ocspRes, ocspResBytes, &ocspStatus{ + code: ocspFailedExtractResponse, + err: err, + } + } ocspRes, err = ocsp.ParseResponse(ocspResBytes, issuer) if err != nil { return ocspRes, ocspResBytes, &ocspStatus{ @@ -445,14 +526,39 @@ func retryOCSP( } } + logger.Debugf("GET fallback OCSP Status from server: %v", printStatus(ocspRes)) return ocspRes, ocspResBytes, &ocspStatus{ code: ocspSuccess, } } +func printStatus(response *ocsp.Response) string { + switch response.Status { + case ocsp.Good: + return "Good" + case ocsp.Revoked: + return "Revoked" + case ocsp.Unknown: + return "Unknown" + default: + return fmt.Sprintf("%d", response.Status) + } +} + +func fullOCSPURL(url *url.URL) string { + fullURL := url.Hostname() + if url.Path != "" { + if !strings.HasPrefix(url.Path, "/") { + fullURL += "/" + } + fullURL += url.Path + } + return fullURL +} + // getRevocationStatus checks the certificate revocation status for subject using issuer certificate. func getRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate) *ocspStatus { - logger.Infof("Subject: %v, Issuer: %v\n", subject.Subject, issuer.Subject) + logger.Infof("Subject: %v, Issuer: %v", subject.Subject, issuer.Subject) status, ocspReq, encodedCertID := validateWithCache(subject, issuer) if isValidOCSPStatus(status.code) { @@ -461,8 +567,8 @@ func getRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate) if ocspReq == nil || encodedCertID == nil { return status } - logger.Infof("cache missed\n") - logger.Infof("OCSP Server: %v\n", subject.OCSPServer) + logger.Infof("cache missed") + logger.Infof("OCSP Server: %v", subject.OCSPServer) if len(subject.OCSPServer) == 0 || isTestNoOCSPURL() { return &ocspStatus{ code: ocspNoServer, @@ -484,9 +590,14 @@ func getRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate) hostnameStr := os.Getenv(ocspTestResponderURLEnv) var hostname string if retryURL := os.Getenv(ocspRetryURLEnv); retryURL != "" { - hostname = fmt.Sprintf(retryURL, u.Hostname(), base64.StdEncoding.EncodeToString(ocspReq)) + hostname = fmt.Sprintf(retryURL, fullOCSPURL(u), base64.StdEncoding.EncodeToString(ocspReq)) + u0, err := url.Parse(hostname) + if err == nil { + hostname = u0.Hostname() + u = u0 + } } else { - hostname = u.Hostname() + hostname = fullOCSPURL(u) } if hostnameStr != "" { u0, err := url.Parse(hostnameStr) @@ -495,6 +606,10 @@ func getRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate) u = u0 } } + + logger.Debugf("Fetching OCSP response from server: %v", u) + logger.Debugf("Host in headers: %v", hostname) + headers := make(map[string]string) headers[httpHeaderContentType] = "application/ocsp-request" headers[httpHeaderAccept] = "application/ocsp-response" @@ -523,7 +638,7 @@ func getRevocationStatus(ctx context.Context, subject, issuer *x509.Certificate) if !isValidOCSPStatus(ret.code) { return ret // return invalid } - v := []interface{}{float64(time.Now().UTC().Unix()), base64.StdEncoding.EncodeToString(ocspResBytes)} + v := &certCacheValue{float64(time.Now().UTC().Unix()), base64.StdEncoding.EncodeToString(ocspResBytes)} ocspResponseCacheLock.Lock() ocspResponseCache[*encodedCertID] = v cacheUpdated = true @@ -666,18 +781,18 @@ func downloadOCSPCacheServer() { Timeout: timeout, Transport: snowflakeInsecureTransport, } - ret, ocspStatus := checkOCSPCacheServer(context.TODO(), ocspClient, http.NewRequest, u, timeout) + ret, ocspStatus := checkOCSPCacheServer(context.Background(), ocspClient, http.NewRequest, u, timeout) if ocspStatus.code != ocspSuccess { return } ocspResponseCacheLock.Lock() for k, cacheValue := range *ret { - status := extractOCSPCacheResponseValueWithoutSubject(cacheValue) + cacheKey := decodeCertIDKey(k) + status := extractOCSPCacheResponseValueWithoutSubject(cacheKey, cacheValue) if !isValidOCSPStatus(status.code) { continue } - cacheKey := encodeCertIDKey(k) ocspResponseCache[*cacheKey] = cacheValue } cacheUpdated = true @@ -703,7 +818,7 @@ func getAllRevocationStatus(ctx context.Context, verifiedChains []*x509.Certific // verifyPeerCertificateSerial verifies the certificate revocation status in serial. func verifyPeerCertificateSerial(_ [][]byte, verifiedChains [][]*x509.Certificate) (err error) { overrideCacheDir() - return verifyPeerCertificate(context.TODO(), verifiedChains) + return verifyPeerCertificate(context.Background(), verifiedChains) } func overrideCacheDir() { @@ -719,11 +834,13 @@ func initOCSPCache() { if strings.EqualFold(os.Getenv(cacheServerEnabledEnv), "false") { return } - ocspResponseCache = make(map[certIDKey][]interface{}) + ocspResponseCache = make(map[certIDKey]*certCacheValue) + ocspParsedRespCache = make(map[parsedOcspRespKey]*ocspStatus) ocspResponseCacheLock = &sync.RWMutex{} + ocspParsedRespCacheLock = &sync.Mutex{} logger.Infof("reading OCSP Response cache file. %v\n", cacheFileName) - f, err := os.OpenFile(cacheFileName, os.O_CREATE|os.O_RDONLY, os.ModePerm) + f, err := os.OpenFile(cacheFileName, os.O_CREATE|os.O_RDONLY, readWriteFileMode) if err != nil { logger.Debugf("failed to open. Ignored. %v\n", err) return @@ -731,7 +848,6 @@ func initOCSPCache() { defer f.Close() buf := make(map[string][]interface{}) - r := bufio.NewReader(f) dec := json.NewDecoder(r) for { @@ -742,54 +858,73 @@ func initOCSPCache() { return } } + for k, cacheValue := range buf { - status := extractOCSPCacheResponseValueWithoutSubject(cacheValue) + ok, ts, ocspRespBase64 := extractTsAndOcspRespBase64(cacheValue) + if !ok { + continue + } + certValue := &certCacheValue{ts, ocspRespBase64} + cacheKey := decodeCertIDKey(k) + status := extractOCSPCacheResponseValueWithoutSubject(cacheKey, certValue) if !isValidOCSPStatus(status.code) { continue } - cacheKey := encodeCertIDKey(k) - ocspResponseCache[*cacheKey] = cacheValue + ocspResponseCache[*cacheKey] = certValue } cacheUpdated = false } -func extractOCSPCacheResponseValueWithoutSubject(cacheValue []interface{}) *ocspStatus { - return extractOCSPCacheResponseValue(cacheValue, nil, nil) + +func extractTsAndOcspRespBase64(value []interface{}) (bool, float64, string) { + ts, ok := value[0].(float64) + if !ok { + logger.Warnf("cannot cast %v as float64", value[0]) + return false, -1, "" + } + ocspRespBase64, ok := value[1].(string) + if !ok { + logger.Warnf("cannot cast %v as string", value[1]) + return false, -1, "" + } + return true, ts, ocspRespBase64 +} + +func extractOCSPCacheResponseValueWithoutSubject(cacheKey *certIDKey, cacheValue *certCacheValue) *ocspStatus { + return extractOCSPCacheResponseValue(cacheKey, cacheValue, nil, nil) } -func extractOCSPCacheResponseValue(cacheValue []interface{}, subject, issuer *x509.Certificate) *ocspStatus { +func extractOCSPCacheResponseValue(certIDKey *certIDKey, certCacheValue *certCacheValue, subject, issuer *x509.Certificate) *ocspStatus { subjectName := "Unknown" if subject != nil { subjectName = subject.Subject.CommonName } curTime := time.Now() - if len(cacheValue) != 2 { + currentTime := float64(curTime.UTC().Unix()) + if currentTime-certCacheValue.ts >= cacheExpire { return &ocspStatus{ - code: ocspMissedCache, - err: fmt.Errorf("miss cache data. subject: %v", subjectName), + code: ocspCacheExpired, + err: fmt.Errorf("cache expired. current: %v, cache: %v", + time.Unix(int64(currentTime), 0).UTC(), time.Unix(int64(certCacheValue.ts), 0).UTC()), } } - if ts, ok := cacheValue[0].(float64); ok { - currentTime := float64(curTime.UTC().Unix()) - if currentTime-ts >= cacheExpire { - return &ocspStatus{ - code: ocspCacheExpired, - err: fmt.Errorf("cache expired. current: %v, cache: %v", - time.Unix(int64(currentTime), 0).UTC(), time.Unix(int64(ts), 0).UTC()), - } - } + + ocspParsedRespCacheLock.Lock() + defer ocspParsedRespCacheLock.Unlock() + + var cacheKey parsedOcspRespKey + if certIDKey != nil { + cacheKey = parsedOcspRespKey{certCacheValue.ocspRespBase64, encodeCertIDKey(certIDKey)} } else { - return &ocspStatus{ - code: ocspFailedDecodeResponse, - err: errors.New("the first cache element is not float64"), - } + cacheKey = parsedOcspRespKey{certCacheValue.ocspRespBase64, ""} } - var err error - var r *ocsp.Response - if s, ok := cacheValue[1].(string); ok { + status, ok := ocspParsedRespCache[cacheKey] + if !ok { + logger.Debugf("OCSP status not found in cache; certIdKey: %v", certIDKey) + var err error var b []byte - b, err = base64.StdEncoding.DecodeString(s) + b, err = base64.StdEncoding.DecodeString(certCacheValue.ocspRespBase64) if err != nil { return &ocspStatus{ code: ocspFailedDecodeResponse, @@ -797,7 +932,8 @@ func extractOCSPCacheResponseValue(cacheValue []interface{}, subject, issuer *x5 } } // check the revocation status here - r, err = ocsp.ParseResponse(b, issuer) + ocspResponse, err := ocsp.ParseResponse(b, issuer) + if err != nil { logger.Warnf("the second cache element is not a valid OCSP Response. Ignored. subject: %v\n", subjectName) return &ocspStatus{ @@ -805,14 +941,11 @@ func extractOCSPCacheResponseValue(cacheValue []interface{}, subject, issuer *x5 err: fmt.Errorf("failed to parse OCSP Respose. subject: %v, err: %v", subjectName, err), } } - } else { - return &ocspStatus{ - code: ocspFailedDecodeResponse, - err: errors.New("the second cache element is not string"), - } - + status = validateOCSP(ocspResponse) + ocspParsedRespCache[cacheKey] = status } - return validateOCSP(r) + logger.Debugf("OCSP status found in cache: %v; certIdKey: %v", status, certIDKey) + return status } // writeOCSPCacheFile writes a OCSP Response cache file. This is called if all revocation status is success. @@ -853,8 +986,8 @@ func writeOCSPCacheFile() { buf := make(map[string][]interface{}) for k, v := range ocspResponseCache { - cacheKeyInBase64 := decodeCertIDKey(&k) - buf[cacheKeyInBase64] = v + cacheKeyInBase64 := encodeCertIDKey(&k) + buf[cacheKeyInBase64] = []interface{}{v.ts, v.ocspRespBase64} } j, err := json.Marshal(buf) diff --git a/vendor/github.com/snowflakedb/gosnowflake/query.go b/vendor/github.com/snowflakedb/gosnowflake/query.go index db76d162..162b4523 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/query.go +++ b/vendor/github.com/snowflakedb/gosnowflake/query.go @@ -3,6 +3,7 @@ package gosnowflake import ( + "encoding/json" "time" ) @@ -27,6 +28,22 @@ type execRequest struct { Parameters map[string]interface{} `json:"parameters,omitempty"` Bindings map[string]execBindParameter `json:"bindings,omitempty"` BindStage string `json:"bindStage,omitempty"` + QueryContext requestQueryContext `json:"queryContextDTO,omitempty"` +} + +type requestQueryContext struct { + Entries []requestQueryContextEntry `json:"entries,omitempty"` +} + +type requestQueryContextEntry struct { + Context contextData `json:"context,omitempty"` + ID int `json:"id"` + Priority int `json:"priority"` + Timestamp int64 `json:"timestamp,omitempty"` +} + +type contextData struct { + Base64Data string `json:"base64Data,omitempty"` } type execResponseRowType struct { @@ -110,6 +127,7 @@ type execResponseData struct { Parallel int64 `json:"parallel,omitempty"` Threshold int64 `json:"threshold,omitempty"` AutoCompress bool `json:"autoCompress,omitempty"` + Overwrite bool `json:"overwrite,omitempty"` SourceCompression string `json:"sourceCompression,omitempty"` ShowEncryptionParameter bool `json:"clientShowEncryptionParameter,omitempty"` EncryptionMaterial encryptionWrapper `json:"encryptionMaterial,omitempty"` @@ -118,6 +136,9 @@ type execResponseData struct { Command string `json:"command,omitempty"` Kind string `json:"kind,omitempty"` Operation string `json:"operation,omitempty"` + + // HTAP + QueryContext json.RawMessage `json:"queryContext,omitempty"` } type execResponse struct { diff --git a/vendor/github.com/snowflakedb/gosnowflake/restful.go b/vendor/github.com/snowflakedb/gosnowflake/restful.go index 04da332c..c92d9c76 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/restful.go +++ b/vendor/github.com/snowflakedb/gosnowflake/restful.go @@ -37,23 +37,32 @@ const ( tokenRequestPath = "/session/token-request" abortRequestPath = "/queries/v1/abort-request" authenticatorRequestPath = "/session/authenticator-request" + monitoringQueriesPath = "/monitoring/queries" sessionRequestPath = "/session" heartBeatPath = "/session/heartbeat" ) type ( - funcGetType func(context.Context, *snowflakeRestful, *url.URL, map[string]string, time.Duration) (*http.Response, error) - funcPostType func(context.Context, *snowflakeRestful, *url.URL, map[string]string, []byte, time.Duration, bool) (*http.Response, error) + funcGetType func(context.Context, *snowflakeRestful, *url.URL, map[string]string, time.Duration) (*http.Response, error) + funcPostType func(context.Context, *snowflakeRestful, *url.URL, map[string]string, []byte, time.Duration, currentTimeProvider, *Config) (*http.Response, error) + funcAuthPostType func(context.Context, *http.Client, *url.URL, map[string]string, bodyCreatorType, time.Duration, int) (*http.Response, error) + bodyCreatorType func() ([]byte, error) ) +var emptyBodyCreator = func() ([]byte, error) { + return []byte{}, nil +} + type snowflakeRestful struct { Host string Port int Protocol string LoginTimeout time.Duration // Login timeout RequestTimeout time.Duration // request timeout + MaxRetryCount int Client *http.Client + JWTClient *http.Client TokenAccessor TokenAccessor HeartBeat *heartbeat @@ -63,11 +72,12 @@ type snowflakeRestful struct { FuncPostQueryHelper func(context.Context, *snowflakeRestful, *url.Values, map[string]string, []byte, time.Duration, UUID, *Config) (*execResponse, error) FuncPost funcPostType FuncGet funcGetType + FuncAuthPost funcAuthPostType FuncRenewSession func(context.Context, *snowflakeRestful, time.Duration) error - FuncPostAuth func(context.Context, *snowflakeRestful, *url.Values, map[string]string, []byte, time.Duration) (*authResponse, error) FuncCloseSession func(context.Context, *snowflakeRestful, time.Duration) error FuncCancelQuery func(context.Context, *snowflakeRestful, UUID, time.Duration) error + FuncPostAuth func(context.Context, *snowflakeRestful, *http.Client, *url.Values, map[string]string, bodyCreatorType, time.Duration) (*authResponse, error) FuncPostAuthSAML func(context.Context, *snowflakeRestful, map[string]string, []byte, time.Duration) (*authResponse, error) FuncPostAuthOKTA func(context.Context, *snowflakeRestful, map[string]string, []byte, string, time.Duration) (*authOKTAResponse, error) FuncGetSSO func(context.Context, *snowflakeRestful, *url.Values, map[string]string, string, time.Duration) ([]byte, error) @@ -92,6 +102,16 @@ func (sr *snowflakeRestful) getFullURL(path string, params *url.Values) *url.URL return ret } +// We need separate client for JWT, because if token processing takes too long, token may be already expired. +func (sr *snowflakeRestful) getClientFor(authType AuthType) *http.Client { + switch authType { + case AuthTypeJwt: + return sr.JWTClient + default: + return sr.Client + } +} + // Renew the snowflake session if the current token is still the stale token specified func (sr *snowflakeRestful) renewExpiredSessionToken(ctx context.Context, timeout time.Duration, expiredToken string) error { err := sr.TokenAccessor.Lock() @@ -144,10 +164,13 @@ func postRestful( headers map[string]string, body []byte, timeout time.Duration, - raise4XX bool) ( + currentTimeProvider currentTimeProvider, + cfg *Config) ( *http.Response, error) { - return newRetryHTTP( - ctx, sr.Client, http.NewRequest, fullURL, headers, timeout).doPost().setBody(body).doRaise4XX(raise4XX).execute() + return newRetryHTTP(ctx, sr.Client, http.NewRequest, fullURL, headers, timeout, sr.MaxRetryCount, currentTimeProvider, cfg). + doPost(). + setBody(body). + execute() } func getRestful( @@ -157,8 +180,22 @@ func getRestful( headers map[string]string, timeout time.Duration) ( *http.Response, error) { - return newRetryHTTP( - ctx, sr.Client, http.NewRequest, fullURL, headers, timeout).execute() + return newRetryHTTP(ctx, sr.Client, http.NewRequest, fullURL, headers, timeout, sr.MaxRetryCount, defaultTimeProvider, nil).execute() +} + +func postAuthRestful( + ctx context.Context, + client *http.Client, + fullURL *url.URL, + headers map[string]string, + bodyCreator bodyCreatorType, + timeout time.Duration, + maxRetryCount int) ( + *http.Response, error) { + return newRetryHTTP(ctx, client, http.NewRequest, fullURL, headers, timeout, maxRetryCount, defaultTimeProvider, nil). + doPost(). + setBodyCreator(bodyCreator). + execute() } func postRestfulQuery( @@ -179,7 +216,7 @@ func postRestfulQuery( return data, err } - if err = sr.FuncCancelQuery(context.TODO(), sr, requestID, timeout); err != nil { + if err = sr.FuncCancelQuery(context.Background(), sr, requestID, timeout); err != nil { return nil, err } return nil, ctx.Err() @@ -197,7 +234,6 @@ func postRestfulQueryHelper( data *execResponse, err error) { logger.Infof("params: %v", params) params.Add(requestIDKey, requestID.String()) - params.Add("clientStartTime", strconv.FormatInt(time.Now().Unix(), 10)) params.Add(requestGUIDKey, NewUUID().String()) token, _, _ := sr.TokenAccessor.GetTokens() if token != "" { @@ -206,7 +242,7 @@ func postRestfulQueryHelper( var resp *http.Response fullURL := sr.getFullURL(queryRequestPath, params) - resp, err = sr.FuncPost(ctx, sr, fullURL, headers, body, timeout, true) + resp, err = sr.FuncPost(ctx, sr, fullURL, headers, body, timeout, defaultTimeProvider, cfg) if err != nil { return nil, err } @@ -298,7 +334,7 @@ func closeSession(ctx context.Context, sr *snowflakeRestful, timeout time.Durati token, _, _ := sr.TokenAccessor.GetTokens() headers[headerAuthorizationKey] = fmt.Sprintf(headerSnowflakeToken, token) - resp, err := sr.FuncPost(ctx, sr, fullURL, headers, nil, 5*time.Second, false) + resp, err := sr.FuncPost(ctx, sr, fullURL, headers, nil, 5*time.Second, defaultTimeProvider, nil) if err != nil { return err } @@ -357,7 +393,7 @@ func renewRestfulSession(ctx context.Context, sr *snowflakeRestful, timeout time return err } - resp, err := sr.FuncPost(ctx, sr, fullURL, headers, reqBody, timeout, false) + resp, err := sr.FuncPost(ctx, sr, fullURL, headers, reqBody, timeout, defaultTimeProvider, nil) if err != nil { return err } @@ -429,7 +465,7 @@ func cancelQuery(ctx context.Context, sr *snowflakeRestful, requestID UUID, time return err } - resp, err := sr.FuncPost(ctx, sr, fullURL, headers, reqByte, timeout, false) + resp, err := sr.FuncPost(ctx, sr, fullURL, headers, reqByte, timeout, defaultTimeProvider, nil) if err != nil { return err } diff --git a/vendor/github.com/snowflakedb/gosnowflake/result.go b/vendor/github.com/snowflakedb/gosnowflake/result.go index e08f4190..c2a71830 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/result.go +++ b/vendor/github.com/snowflakedb/gosnowflake/result.go @@ -2,6 +2,8 @@ package gosnowflake +import "errors" + type queryStatus string const ( @@ -73,3 +75,19 @@ func (res *snowflakeResult) waitForAsyncExecStatus() error { } return nil } + +type snowflakeResultNoRows struct { + queryID string +} + +func (*snowflakeResultNoRows) LastInsertId() (int64, error) { + return 0, errors.New("no LastInsertId available") +} + +func (*snowflakeResultNoRows) RowsAffected() (int64, error) { + return 0, errors.New("no RowsAffected available") +} + +func (rnr *snowflakeResultNoRows) GetQueryID() string { + return rnr.queryID +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/retry.go b/vendor/github.com/snowflakedb/gosnowflake/retry.go index 07a7709f..5292f621 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/retry.go +++ b/vendor/github.com/snowflakedb/gosnowflake/retry.go @@ -5,30 +5,53 @@ package gosnowflake import ( "bytes" "context" - "crypto/x509" "fmt" "io" + "math" "math/rand" "net/http" "net/url" - "runtime" "strconv" "strings" "sync" "time" ) +type waitAlgo struct { + mutex *sync.Mutex // required for *rand.Rand usage + random *rand.Rand + base time.Duration // base wait time + cap time.Duration // maximum wait time +} + var random *rand.Rand +var defaultWaitAlgo *waitAlgo + +var authEndpoints = []string{ + loginRequestPath, + tokenRequestPath, + authenticatorRequestPath, +} + +var clientErrorsStatusCodesEligibleForRetry = []int{ + http.StatusTooManyRequests, + http.StatusRequestTimeout, +} func init() { random = rand.New(rand.NewSource(time.Now().UnixNano())) + defaultWaitAlgo = &waitAlgo{mutex: &sync.Mutex{}, random: random, base: 5 * time.Second, cap: 160 * time.Second} } const ( // requestGUIDKey is attached to every request against Snowflake requestGUIDKey string = "request_guid" - // retryCounterKey is attached to query-request from the second time - retryCounterKey string = "retryCounter" + // retryCountKey is attached to query-request from the second time + retryCountKey string = "retryCount" + // retryReasonKey contains last HTTP status or 0 if timeout + retryReasonKey string = "retryReason" + // clientStartTime contains a time when client started request (first request, not retries) + clientStartTimeKey string = "clientStartTime" // requestIDKey is attached to all requests to Snowflake requestIDKey string = "requestId" ) @@ -86,56 +109,109 @@ func (replacer *requestGUIDReplace) replace() *url.URL { return replacer.urlPtr } -type retryCounterUpdater interface { +type retryCountUpdater interface { replaceOrAdd(retry int) *url.URL } -type retryCounterUpdate struct { +type retryCountUpdate struct { urlPtr *url.URL urlValues url.Values } // this replacer does nothing but replace the url -type transientReplaceOrAdd struct { +type transientRetryCountUpdater struct { urlPtr *url.URL } -func (replaceOrAdder *transientReplaceOrAdd) replaceOrAdd(retry int) *url.URL { +func (replaceOrAdder *transientRetryCountUpdater) replaceOrAdd(retry int) *url.URL { return replaceOrAdder.urlPtr } -func (replacer *retryCounterUpdate) replaceOrAdd(retry int) *url.URL { - replacer.urlValues.Del(retryCounterKey) - replacer.urlValues.Add(retryCounterKey, strconv.Itoa(retry)) +func (replacer *retryCountUpdate) replaceOrAdd(retry int) *url.URL { + replacer.urlValues.Del(retryCountKey) + replacer.urlValues.Add(retryCountKey, strconv.Itoa(retry)) replacer.urlPtr.RawQuery = replacer.urlValues.Encode() return replacer.urlPtr } -func newRetryUpdate(urlPtr *url.URL) retryCounterUpdater { - if !strings.HasPrefix(urlPtr.Path, queryRequestPath) { +func newRetryCountUpdater(urlPtr *url.URL) retryCountUpdater { + if !isQueryRequest(urlPtr) { // nop if not query-request - return &transientReplaceOrAdd{urlPtr} + return &transientRetryCountUpdater{urlPtr} } values, err := url.ParseQuery(urlPtr.RawQuery) if err != nil { // nop if the URL is not valid - return &transientReplaceOrAdd{urlPtr} + return &transientRetryCountUpdater{urlPtr} } - return &retryCounterUpdate{urlPtr, values} + return &retryCountUpdate{urlPtr, values} } -type waitAlgo struct { - mutex *sync.Mutex // required for random.Int63n - base time.Duration // base wait time - cap time.Duration // maximum wait time +type retryReasonUpdater interface { + replaceOrAdd(reason int) *url.URL } -func randSecondDuration(n time.Duration) time.Duration { - return time.Duration(random.Int63n(int64(n/time.Second))) * time.Second +type retryReasonUpdate struct { + url *url.URL +} + +func (retryReasonUpdater *retryReasonUpdate) replaceOrAdd(reason int) *url.URL { + query := retryReasonUpdater.url.Query() + query.Del(retryReasonKey) + query.Add(retryReasonKey, strconv.Itoa(reason)) + retryReasonUpdater.url.RawQuery = query.Encode() + return retryReasonUpdater.url +} + +type transientRetryReasonUpdater struct { + url *url.URL +} + +func (retryReasonUpdater *transientRetryReasonUpdater) replaceOrAdd(_ int) *url.URL { + return retryReasonUpdater.url } -// decorrelated jitter backoff -func (w *waitAlgo) decorr(attempt int, sleep time.Duration) time.Duration { +func newRetryReasonUpdater(url *url.URL, cfg *Config) retryReasonUpdater { + // not a query request + if !isQueryRequest(url) { + return &transientRetryReasonUpdater{url} + } + // implicitly disabled retry reason + if cfg != nil && cfg.IncludeRetryReason == ConfigBoolFalse { + return &transientRetryReasonUpdater{url} + } + return &retryReasonUpdate{url} +} + +func ensureClientStartTimeIsSet(url *url.URL, clientStartTime string) *url.URL { + if !isQueryRequest(url) { + // nop if not query-request + return url + } + query := url.Query() + if query.Has(clientStartTimeKey) { + return url + } + query.Add(clientStartTimeKey, clientStartTime) + url.RawQuery = query.Encode() + return url +} + +func isQueryRequest(url *url.URL) bool { + return strings.HasPrefix(url.Path, queryRequestPath) +} + +// jitter backoff in seconds +func (w *waitAlgo) calculateWaitBeforeRetryForAuthRequest(attempt int, currWaitTimeDuration time.Duration) time.Duration { + w.mutex.Lock() + defer w.mutex.Unlock() + currWaitTimeInSeconds := currWaitTimeDuration.Seconds() + jitterAmount := w.getJitter(currWaitTimeInSeconds) + jitteredSleepTime := chooseRandomFromRange(currWaitTimeInSeconds+jitterAmount, math.Pow(2, float64(attempt))+jitterAmount) + return time.Duration(jitteredSleepTime * float64(time.Second)) +} + +func (w *waitAlgo) calculateWaitBeforeRetry(attempt int, sleep time.Duration) time.Duration { w.mutex.Lock() defer w.mutex.Unlock() t := 3*sleep - w.base @@ -148,10 +224,14 @@ func (w *waitAlgo) decorr(attempt int, sleep time.Duration) time.Duration { return w.base } -var defaultWaitAlgo = &waitAlgo{ - mutex: &sync.Mutex{}, - base: 5 * time.Second, - cap: 160 * time.Second, +func randSecondDuration(n time.Duration) time.Duration { + return time.Duration(random.Int63n(int64(n/time.Second))) * time.Second +} + +func (w *waitAlgo) getJitter(currWaitTime float64) float64 { + multiplicationFactor := chooseRandomFromRange(-1, 1) + jitterAmount := 0.5 * currWaitTime * multiplicationFactor + return jitterAmount } type requestFunc func(method, urlStr string, body io.Reader) (*http.Request, error) @@ -161,15 +241,17 @@ type clientInterface interface { } type retryHTTP struct { - ctx context.Context - client clientInterface - req requestFunc - method string - fullURL *url.URL - headers map[string]string - body []byte - timeout time.Duration - raise4XX bool + ctx context.Context + client clientInterface + req requestFunc + method string + fullURL *url.URL + headers map[string]string + bodyCreator bodyCreatorType + timeout time.Duration + maxRetryCount int + currentTimeProvider currentTimeProvider + cfg *Config } func newRetryHTTP(ctx context.Context, @@ -177,7 +259,10 @@ func newRetryHTTP(ctx context.Context, req requestFunc, fullURL *url.URL, headers map[string]string, - timeout time.Duration) *retryHTTP { + timeout time.Duration, + maxRetryCount int, + currentTimeProvider currentTimeProvider, + cfg *Config) *retryHTTP { instance := retryHTTP{} instance.ctx = ctx instance.client = client @@ -185,24 +270,28 @@ func newRetryHTTP(ctx context.Context, instance.method = "GET" instance.fullURL = fullURL instance.headers = headers - instance.body = nil instance.timeout = timeout - instance.raise4XX = false + instance.maxRetryCount = maxRetryCount + instance.bodyCreator = emptyBodyCreator + instance.currentTimeProvider = currentTimeProvider + instance.cfg = cfg return &instance } -func (r *retryHTTP) doRaise4XX(raise4XX bool) *retryHTTP { - r.raise4XX = raise4XX - return r -} - func (r *retryHTTP) doPost() *retryHTTP { r.method = "POST" return r } func (r *retryHTTP) setBody(body []byte) *retryHTTP { - r.body = body + r.bodyCreator = func() ([]byte, error) { + return body, nil + } + return r +} + +func (r *retryHTTP) setBodyCreator(bodyCreator bodyCreatorType) *retryHTTP { + r.bodyCreator = bodyCreator return r } @@ -210,14 +299,20 @@ func (r *retryHTTP) execute() (res *http.Response, err error) { totalTimeout := r.timeout logger.WithContext(r.ctx).Infof("retryHTTP.totalTimeout: %v", totalTimeout) retryCounter := 0 - sleepTime := time.Duration(0) + sleepTime := time.Duration(time.Second) + clientStartTime := strconv.FormatInt(r.currentTimeProvider.currentTime(), 10) - var rIDReplacer requestGUIDReplacer - var rUpdater retryCounterUpdater + var requestGUIDReplacer requestGUIDReplacer + var retryCountUpdater retryCountUpdater + var retryReasonUpdater retryReasonUpdater for { logger.Debugf("retry count: %v", retryCounter) - req, err := r.req(r.method, r.fullURL.String(), bytes.NewReader(r.body)) + body, err := r.bodyCreator() + if err != nil { + return nil, err + } + req, err := r.req(r.method, r.fullURL.String(), bytes.NewReader(body)) if err != nil { return nil, err } @@ -229,54 +324,60 @@ func (r *retryHTTP) execute() (res *http.Response, err error) { req.Header.Set(k, v) } res, err = r.client.Do(req) + // check if it can retry. + retryable, err := isRetryableError(req, res, err) + if !retryable { + return res, err + } if err != nil { - // check if it can retry. - doExit, err := r.isRetryableError(err) - if doExit { - return res, err - } - // cannot just return 4xx and 5xx status as the error can be sporadic. run often helps. logger.WithContext(r.ctx).Warningf( - "failed http connection. no response is returned. err: %v. retrying...\n", err) + "failed http connection. err: %v. retrying...\n", err) } else { - if res.StatusCode == http.StatusOK || r.raise4XX && res != nil && res.StatusCode >= 400 && res.StatusCode < 500 && res.StatusCode != 429 { - // exit if success - // or - // abort connection if raise4XX flag is enabled and the range of HTTP status code are 4XX. - // This is currently used for Snowflake login. The caller must generate an error object based on HTTP status. - break - } logger.WithContext(r.ctx).Warningf( "failed http connection. HTTP Status: %v. retrying...\n", res.StatusCode) res.Body.Close() } - // uses decorrelated jitter backoff - sleepTime = defaultWaitAlgo.decorr(retryCounter, sleepTime) + // uses exponential jitter backoff + retryCounter++ + if isLoginRequest(req) { + sleepTime = defaultWaitAlgo.calculateWaitBeforeRetryForAuthRequest(retryCounter, sleepTime) + } else { + sleepTime = defaultWaitAlgo.calculateWaitBeforeRetry(retryCounter, sleepTime) + } if totalTimeout > 0 { logger.WithContext(r.ctx).Infof("to timeout: %v", totalTimeout) // if any timeout is set totalTimeout -= sleepTime - if totalTimeout <= 0 { + if totalTimeout <= 0 || retryCounter > r.maxRetryCount { if err != nil { return nil, err } if res != nil { - return nil, fmt.Errorf("timeout after %s. HTTP Status: %v. Hanging?", r.timeout, res.StatusCode) + return nil, fmt.Errorf("timeout after %s and %v retries. HTTP Status: %v. Hanging?", r.timeout, retryCounter, res.StatusCode) } - return nil, fmt.Errorf("timeout after %s. Hanging?", r.timeout) + return nil, fmt.Errorf("timeout after %s and %v retries. Hanging?", r.timeout, retryCounter) } } - retryCounter++ - if rIDReplacer == nil { - rIDReplacer = newRequestGUIDReplace(r.fullURL) + if requestGUIDReplacer == nil { + requestGUIDReplacer = newRequestGUIDReplace(r.fullURL) + } + r.fullURL = requestGUIDReplacer.replace() + if retryCountUpdater == nil { + retryCountUpdater = newRetryCountUpdater(r.fullURL) + } + r.fullURL = retryCountUpdater.replaceOrAdd(retryCounter) + if retryReasonUpdater == nil { + retryReasonUpdater = newRetryReasonUpdater(r.fullURL, r.cfg) } - r.fullURL = rIDReplacer.replace() - if rUpdater == nil { - rUpdater = newRetryUpdate(r.fullURL) + retryReason := 0 + if res != nil { + retryReason = res.StatusCode } - r.fullURL = rUpdater.replaceOrAdd(retryCounter) + r.fullURL = retryReasonUpdater.replaceOrAdd(retryReason) + r.fullURL = ensureClientStartTimeIsSet(r.fullURL, clientStartTime) logger.WithContext(r.ctx).Infof("sleeping %v. to timeout: %v. retrying", sleepTime, totalTimeout) + logger.WithContext(r.ctx).Infof("retry count: %v, retry reason: %v", retryCounter, retryReason) await := time.NewTimer(sleepTime) select { @@ -287,36 +388,22 @@ func (r *retryHTTP) execute() (res *http.Response, err error) { return res, r.ctx.Err() } } - return res, err } -func (r *retryHTTP) isRetryableError(err error) (bool, error) { - urlError, isURLError := err.(*url.Error) - if isURLError { - // context cancel or timeout - if urlError.Err == context.DeadlineExceeded || urlError.Err == context.Canceled { - return true, urlError.Err - } - if driverError, ok := urlError.Err.(*SnowflakeError); ok { - // Certificate Revoked - if driverError.Number == ErrOCSPStatusRevoked { - return true, err - } - } - if _, ok := urlError.Err.(x509.CertificateInvalidError); ok { - // Certificate is invalid - return true, err - } - if _, ok := urlError.Err.(x509.UnknownAuthorityError); ok { - // Certificate is self-signed - return true, err - } - errString := urlError.Err.Error() - if runtime.GOOS == "darwin" && strings.HasPrefix(errString, "x509:") && strings.HasSuffix(errString, "certificate is expired") { - // Certificate is expired - return true, err - } - +func isRetryableError(req *http.Request, res *http.Response, err error) (bool, error) { + if err != nil && res == nil { // Failed http connection. Most probably client timeout. + return true, err + } + if res == nil || req == nil { + return false, err } - return false, err + return isRetryableStatus(res.StatusCode), err +} + +func isRetryableStatus(statusCode int) bool { + return (statusCode >= 500 && statusCode < 600) || contains(clientErrorsStatusCodesEligibleForRetry, statusCode) +} + +func isLoginRequest(req *http.Request) bool { + return contains(authEndpoints, req.URL.Path) } diff --git a/vendor/github.com/snowflakedb/gosnowflake/rows.go b/vendor/github.com/snowflakedb/gosnowflake/rows.go index 034acb67..f8e5d50f 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/rows.go +++ b/vendor/github.com/snowflakedb/gosnowflake/rows.go @@ -43,6 +43,14 @@ type snowflakeRows struct { status queryStatus err error errChannel chan error + location *time.Location +} + +func (rows *snowflakeRows) getLocation() *time.Location { + if rows.location == nil && rows.sc != nil && rows.sc.cfg != nil { + rows.location = getCurrentLocation(rows.sc.cfg.Params) + } + return rows.location } type snowflakeValue interface{} @@ -140,7 +148,7 @@ func (rows *snowflakeRows) ColumnTypeScanType(index int) reflect.Type { return nil } return snowflakeTypeToGo( - getSnowflakeType(strings.ToUpper(rows.ChunkDownloader.getRowType()[index].Type)), + getSnowflakeType(rows.ChunkDownloader.getRowType()[index].Type), rows.ChunkDownloader.getRowType()[index].Scale) } @@ -184,11 +192,7 @@ func (rows *snowflakeRows) Next(dest []driver.Value) (err error) { for i, n := 0, len(row.RowSet); i < n; i++ { // could move to chunk downloader so that each go routine // can convert data - var loc *time.Location - if rows.sc != nil { - loc = getCurrentLocation(rows.sc.cfg.Params) - } - err = stringToValue(&dest[i], rows.ChunkDownloader.getRowType()[i], row.RowSet[i], loc) + err = stringToValue(&dest[i], rows.ChunkDownloader.getRowType()[i], row.RowSet[i], rows.getLocation()) if err != nil { return err } @@ -213,7 +217,9 @@ func (rows *snowflakeRows) NextResultSet() error { return io.EOF } rows.ChunkDownloader = rows.ChunkDownloader.getNextChunkDownloader() - rows.ChunkDownloader.start() + if err := rows.ChunkDownloader.start(); err != nil { + return err + } } return rows.ChunkDownloader.nextResultSet() } diff --git a/vendor/github.com/snowflakedb/gosnowflake/s3_storage_client.go b/vendor/github.com/snowflakedb/gosnowflake/s3_storage_client.go index 28376d9e..37799c82 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/s3_storage_client.go +++ b/vendor/github.com/snowflakedb/gosnowflake/s3_storage_client.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "io" + "net/http" "os" "strings" @@ -52,6 +53,9 @@ func (util *snowflakeS3Client) createClient(info *execResponseStageInfo, useAcce stageCredentials.AwsToken)), EndpointResolver: resolver, UseAccelerate: useAccelerateEndpoint, + HTTPClient: &http.Client{ + Transport: SnowflakeTransport, + }, }), nil } @@ -80,11 +84,7 @@ func (util *snowflakeS3Client) getFileHeader(meta *fileMetadata, filename string if errors.As(err, &ae) { if ae.ErrorCode() == notFound { meta.resStatus = notFoundFile - return &fileHeader{ - digest: "", - contentLength: 0, - encryptionMetadata: nil, - }, nil + return nil, fmt.Errorf("could not find file") } else if ae.ErrorCode() == expiredToken { meta.resStatus = renewToken return nil, fmt.Errorf("received expired token. renewing") @@ -93,6 +93,9 @@ func (util *snowflakeS3Client) getFileHeader(meta *fileMetadata, filename string meta.lastError = err return nil, fmt.Errorf("error while retrieving header") } + meta.resStatus = errStatus + meta.lastError = err + return nil, fmt.Errorf("unexpected error while retrieving header: %v", err) } meta.resStatus = uploaded @@ -104,13 +107,27 @@ func (util *snowflakeS3Client) getFileHeader(meta *fileMetadata, filename string out.Metadata[amzMatdesc], } } + contentLength := convertContentLength(out.ContentLength) return &fileHeader{ out.Metadata[sfcDigest], - out.ContentLength, + contentLength, &encMeta, }, nil } +// SNOW-974548 remove this function after upgrading AWS SDK +func convertContentLength(contentLength any) int64 { + switch t := contentLength.(type) { + case int64: + return t + case *int64: + if t != nil { + return *t + } + } + return 0 +} + type s3UploadAPI interface { Upload(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*manager.Uploader)) (*manager.UploadOutput, error) } @@ -217,7 +234,7 @@ func (util *snowflakeS3Client) nativeDownloadFile( } } - f, err := os.OpenFile(fullDstFileName, os.O_CREATE|os.O_WRONLY, os.ModePerm) + f, err := os.OpenFile(fullDstFileName, os.O_CREATE|os.O_WRONLY, readWriteFileMode) if err != nil { return err } diff --git a/vendor/github.com/snowflakedb/gosnowflake/statement.go b/vendor/github.com/snowflakedb/gosnowflake/statement.go index 20300998..e3ce5b74 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/statement.go +++ b/vendor/github.com/snowflakedb/gosnowflake/statement.go @@ -1,15 +1,23 @@ -// Copyright (c) 2017-2022 Snowflake Computing Inc. All rights reserved. +// Copyright (c) 2017-2023 Snowflake Computing Inc. All rights reserved. package gosnowflake import ( "context" "database/sql/driver" + "errors" + "fmt" ) +// SnowflakeStmt represents the prepared statement in driver. +type SnowflakeStmt interface { + GetQueryID() string +} + type snowflakeStmt struct { - sc *snowflakeConn - query string + sc *snowflakeConn + query string + lastQueryID string } func (stmt *snowflakeStmt) Close() error { @@ -26,20 +34,75 @@ func (stmt *snowflakeStmt) NumInput() int { func (stmt *snowflakeStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { logger.WithContext(stmt.sc.ctx).Infoln("Stmt.ExecContext") - return stmt.sc.ExecContext(ctx, stmt.query, args) + return stmt.execInternal(ctx, args) } func (stmt *snowflakeStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { logger.WithContext(stmt.sc.ctx).Infoln("Stmt.QueryContext") - return stmt.sc.QueryContext(ctx, stmt.query, args) + rows, err := stmt.sc.QueryContext(ctx, stmt.query, args) + if err != nil { + stmt.setQueryIDFromError(err) + return nil, err + } + r, ok := rows.(SnowflakeRows) + if !ok { + return nil, fmt.Errorf("interface convertion. expected type SnowflakeRows but got %T", rows) + } + stmt.lastQueryID = r.GetQueryID() + return rows, nil } func (stmt *snowflakeStmt) Exec(args []driver.Value) (driver.Result, error) { logger.WithContext(stmt.sc.ctx).Infoln("Stmt.Exec") - return stmt.sc.Exec(stmt.query, args) + return stmt.execInternal(context.Background(), toNamedValues(args)) +} + +func (stmt *snowflakeStmt) execInternal(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + logger.WithContext(stmt.sc.ctx).Debugln("Stmt.execInternal") + if ctx == nil { + ctx = context.Background() + } + stmtCtx := context.WithValue(ctx, executionType, executionTypeStatement) + result, err := stmt.sc.ExecContext(stmtCtx, stmt.query, args) + if err != nil { + stmt.setQueryIDFromError(err) + return nil, err + } + rnr, ok := result.(*snowflakeResultNoRows) + if ok { + stmt.lastQueryID = rnr.GetQueryID() + return driver.ResultNoRows, nil + } + r, ok := result.(SnowflakeResult) + if !ok { + return nil, fmt.Errorf("interface convertion. expected type SnowflakeResult but got %T", result) + } + stmt.lastQueryID = r.GetQueryID() + return result, err } func (stmt *snowflakeStmt) Query(args []driver.Value) (driver.Rows, error) { logger.WithContext(stmt.sc.ctx).Infoln("Stmt.Query") - return stmt.sc.Query(stmt.query, args) + rows, err := stmt.sc.Query(stmt.query, args) + if err != nil { + stmt.setQueryIDFromError(err) + return nil, err + } + r, ok := rows.(SnowflakeRows) + if !ok { + return nil, fmt.Errorf("interface convertion. expected type SnowflakeRows but got %T", rows) + } + stmt.lastQueryID = r.GetQueryID() + return rows, err +} + +func (stmt *snowflakeStmt) GetQueryID() string { + return stmt.lastQueryID +} + +func (stmt *snowflakeStmt) setQueryIDFromError(err error) { + var snowflakeError *SnowflakeError + if errors.As(err, &snowflakeError) { + stmt.lastQueryID = snowflakeError.QueryID + } } diff --git a/vendor/github.com/snowflakedb/gosnowflake/storage_client.go b/vendor/github.com/snowflakedb/gosnowflake/storage_client.go index a7385c27..ee746a64 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/storage_client.go +++ b/vendor/github.com/snowflakedb/gosnowflake/storage_client.go @@ -88,7 +88,12 @@ func (rsu *remoteStorageUtil) uploadOneFile(meta *fileMetadata) error { for retry := 0; retry < maxRetry; retry++ { if !meta.overwrite { header, err := utilClass.getFileHeader(meta, meta.dstFileName) - if err != nil { + if meta.resStatus == notFoundFile { + err := utilClass.uploadFile(dataFile, meta, encryptMeta, maxConcurrency, meta.options.MultiPartThreshold) + if err != nil { + logger.Warnf("Error uploading %v. err: %v", dataFile, err) + } + } else if err != nil { return err } if header != nil && meta.resStatus == uploaded { diff --git a/vendor/github.com/snowflakedb/gosnowflake/telemetry.go b/vendor/github.com/snowflakedb/gosnowflake/telemetry.go index 542b8a0c..1c11c9a5 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/telemetry.go +++ b/vendor/github.com/snowflakedb/gosnowflake/telemetry.go @@ -97,7 +97,7 @@ func (st *snowflakeTelemetry) sendBatch() error { } resp, err := st.sr.FuncPost(context.Background(), st.sr, st.sr.getFullURL(telemetryPath, nil), headers, body, - defaultTelemetryTimeout, true) + defaultTelemetryTimeout, defaultTimeProvider, nil) if err != nil { logger.Info("failed to upload metrics to telemetry. err: %v", err) return err diff --git a/vendor/github.com/snowflakedb/gosnowflake/util.go b/vendor/github.com/snowflakedb/gosnowflake/util.go index 17db5f8d..42350284 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/util.go +++ b/vendor/github.com/snowflakedb/gosnowflake/util.go @@ -5,27 +5,32 @@ package gosnowflake import ( "context" "database/sql/driver" + "fmt" "io" + "math/rand" + "os" "strings" "sync" "time" - "github.com/apache/arrow/go/v12/arrow/memory" + "github.com/apache/arrow/go/v14/arrow/memory" ) type contextKey string const ( - multiStatementCount contextKey = "MULTI_STATEMENT_COUNT" - asyncMode contextKey = "ASYNC_MODE_QUERY" - queryIDChannel contextKey = "QUERY_ID_CHANNEL" - snowflakeRequestIDKey contextKey = "SNOWFLAKE_REQUEST_ID" - fetchResultByID contextKey = "SF_FETCH_RESULT_BY_ID" - fileStreamFile contextKey = "STREAMING_PUT_FILE" - fileTransferOptions contextKey = "FILE_TRANSFER_OPTIONS" - enableHigherPrecision contextKey = "ENABLE_HIGHER_PRECISION" - arrowBatches contextKey = "ARROW_BATCHES" - arrowAlloc contextKey = "ARROW_ALLOC" + multiStatementCount contextKey = "MULTI_STATEMENT_COUNT" + asyncMode contextKey = "ASYNC_MODE_QUERY" + queryIDChannel contextKey = "QUERY_ID_CHANNEL" + snowflakeRequestIDKey contextKey = "SNOWFLAKE_REQUEST_ID" + fetchResultByID contextKey = "SF_FETCH_RESULT_BY_ID" + fileStreamFile contextKey = "STREAMING_PUT_FILE" + fileTransferOptions contextKey = "FILE_TRANSFER_OPTIONS" + enableHigherPrecision contextKey = "ENABLE_HIGHER_PRECISION" + arrowBatches contextKey = "ARROW_BATCHES" + arrowAlloc contextKey = "ARROW_ALLOC" + enableOriginalTimestamp contextKey = "ENABLE_ORIGINAL_TIMESTAMP" + queryTag contextKey = "QUERY_TAG" ) const ( @@ -34,6 +39,10 @@ const ( streamChunkDownload contextKey = "STREAM_CHUNK_DOWNLOAD" ) +var ( + defaultTimeProvider = &unixTimeProvider{} +) + // WithMultiStatement returns a context that allows the user to execute the desired number of sql queries in one query func WithMultiStatement(ctx context.Context, num int) (context.Context, error) { return context.WithValue(ctx, multiStatementCount, num), nil @@ -99,6 +108,19 @@ func WithArrowAllocator(ctx context.Context, pool memory.Allocator) context.Cont return context.WithValue(ctx, arrowAlloc, pool) } +// WithOriginalTimestamp in combination with WithArrowBatches returns a context +// that allows users to retrieve arrow.Record with original timestamp struct returned by Snowflake. +// It can be used in case arrow.Timestamp cannot fit original timestamp values. +func WithOriginalTimestamp(ctx context.Context) context.Context { + return context.WithValue(ctx, enableOriginalTimestamp, true) +} + +// WithQueryTag returns a context that will set the given tag as the QUERY_TAG +// parameter on any queries that are run +func WithQueryTag(ctx context.Context, tag string) context.Context { + return context.WithValue(ctx, queryTag, tag) +} + // Get the request ID from the context if specified, otherwise generate one func getOrGenerateRequestIDFromContext(ctx context.Context) UUID { requestID, ok := ctx.Value(snowflakeRequestIDKey).(UUID) @@ -222,3 +244,38 @@ func escapeForCSV(value string) string { } return value } + +// GetFromEnv is used to get the value of an environment variable from the system +func GetFromEnv(name string, failOnMissing bool) (string, error) { + if value := os.Getenv(name); value != "" { + return value, nil + } + if failOnMissing { + return "", fmt.Errorf("%v environment variable is not set", name) + } + return "", nil +} + +type currentTimeProvider interface { + currentTime() int64 +} + +type unixTimeProvider struct { +} + +func (utp *unixTimeProvider) currentTime() int64 { + return time.Now().UnixMilli() +} + +func contains[T comparable](s []T, e T) bool { + for _, v := range s { + if v == e { + return true + } + } + return false +} + +func chooseRandomFromRange(min float64, max float64) float64 { + return rand.Float64()*(max-min) + min +} diff --git a/vendor/github.com/snowflakedb/gosnowflake/version.go b/vendor/github.com/snowflakedb/gosnowflake/version.go index e3b8c145..a719774a 100644 --- a/vendor/github.com/snowflakedb/gosnowflake/version.go +++ b/vendor/github.com/snowflakedb/gosnowflake/version.go @@ -3,4 +3,4 @@ package gosnowflake // SnowflakeGoDriverVersion is the version of Go Snowflake Driver. -const SnowflakeGoDriverVersion = "1.6.22" +const SnowflakeGoDriverVersion = "1.7.2" diff --git a/vendor/github.com/vertica/vertica-sql-go/CONTRIBUTING.md b/vendor/github.com/vertica/vertica-sql-go/CONTRIBUTING.md index a92a86f9..92fb2c6d 100644 --- a/vendor/github.com/vertica/vertica-sql-go/CONTRIBUTING.md +++ b/vendor/github.com/vertica/vertica-sql-go/CONTRIBUTING.md @@ -10,7 +10,7 @@ This document will guide you through the contribution process. There are a numbe If you find a bug, submit an [issue](https://github.com/vertica/vertica-sql-go/issues) with a complete and reproducible bug report. If the issue can't be reproduced, it will be closed. If you opened an issue, but figured out the answer later on your own, comment on the issue to let people know, then close the issue. -For issues (e.g. security related issues) that are **not suitable** to be reported publicly on the GitHub issue system, report your issues to [Vertica team](mailto:vertica-opensrc@microfocus.com) directly or file a case with Vertica support if you have a support account. +For issues (e.g. security related issues) that are **not suitable** to be reported publicly on the GitHub issue system, report your issues to [Vertica team](mailto:vertica-opensrc@opentext.com) directly or file a case with Vertica support if you have a support account. # Feature Requests @@ -75,13 +75,10 @@ were args are one of the following: | Query Argument | Description | Values | |----------------|-------------|--------| -| use_prepared_statements | whether to use client-side query interpolation or server-side argument binding | true = (default) use server-side bindings | -| | | false = user client side interpolation | -| tlsmode | the ssl policy for this connection | 'none' (default) = don't use SSL for this connection | -| | | 'server' = server must support SSL, but skip verification (INSECURE!) | -| | | 'server-strict' = server must support SSL | -| locator | host and port of the Vertica connection | (default) localhost:5433 -| user | Vertica user ID | (default) the userid of the running user | +| use_prepared_statements | whether to use client-side query interpolation or server-side argument binding |
  • true = (default) use server-side bindings
  • false = user client side interpolation
  • | +| tlsmode | the ssl policy for this connection |
  • none (default) = don't use SSL for this connection
  • server = server must support SSL, but skip verification (INSECURE!)
  • server-strict = server must support SSL
  • custom = use custom TLS config (Need to generate certs with `resources/tests/genCerts.sh` in advance)
  • | +| locator | host and port of the Vertica connection | (default) localhost:5433 | +| user | Vertica user name | (default) dbadmin | | password | Vertica password for the connecting user | (default) (empty) | | oauth_access_token | the OAuth Access Token to connect to Vertica, only used for OAuth Authentication tests | (default) (empty) | diff --git a/vendor/github.com/vertica/vertica-sql-go/README.md b/vendor/github.com/vertica/vertica-sql-go/README.md index 5124c844..bb37f75d 100644 --- a/vendor/github.com/vertica/vertica-sql-go/README.md +++ b/vendor/github.com/vertica/vertica-sql-go/README.md @@ -8,7 +8,7 @@ vertica-sql-go is a native Go adapter for the Vertica (http://www.vertica.com) d Please check out [release notes](https://github.com/vertica/vertica-sql-go/releases) to learn about the latest improvements. -vertica-sql-go has been tested with Vertica 12.0.3 and Go 1.16/1.17/1.18/1.19/1.20. +vertica-sql-go has been tested with Vertica 23.3.0 and Go 1.16/1.17/1.18/1.19/1.20. ## Installation @@ -101,6 +101,7 @@ Currently supported query arguments are: | client_label | Sets a label for the connection on the server. This value appears in the `client_label` column of the SESSIONS system table. | (default) vertica-sql-go-{version}-{pid}-{timestamp} | | autocommit | Controls whether the connection automatically commits transactions. | 1 = (default) on
    0 = off| | oauth_access_token | To authenticate via OAuth, provide an OAuth Access Token that authorizes a user to the database. | unspecified by default, if specified then *user* is optional | +| workload | Sets workload property of the session, enabling use of workload routing | empty string by default. Valid values are workload names that already exist in a workload routing rule on the server. If a workload name that doesn't exist is entered, the server will reject it and it will be set to the default empty string | To ping the server and validate a connection (as the connection isn't necessarily created at that moment), simply call the *PingContext()* method. diff --git a/vendor/github.com/vertica/vertica-sql-go/connection.go b/vendor/github.com/vertica/vertica-sql-go/connection.go index 316aebbf..f212c984 100644 --- a/vendor/github.com/vertica/vertica-sql-go/connection.go +++ b/vendor/github.com/vertica/vertica-sql-go/connection.go @@ -113,6 +113,7 @@ type connection struct { serverTZOffset string dead bool // used if a ROLLBACK severity error is encountered sessMutex sync.Mutex + workload string } // Begin - Begin starts and returns a new transaction. (DEPRECATED) @@ -260,6 +261,9 @@ func newConnection(connString string) (*connection, error) { sslFlag = tlsModeNone } + // Read Workload flag + result.workload = result.connURL.Query().Get("workload") + result.conn, err = result.establishSocketConnection() if err != nil { @@ -444,6 +448,7 @@ func (v *connection) handshake() error { ClientPID: v.clientPID, Autocommit: v.autocommit, OAuthAccessToken: v.oauthaccesstoken, + Workload: v.workload, } if err := v.sendMessage(msg); err != nil { @@ -507,13 +512,23 @@ func (v *connection) initializeSession() error { return fmt.Errorf("can't get server timezone: %s", str) } - v.serverTZOffset = str[len(str)-3:] + v.serverTZOffset = getTimeZoneOffset(str) - connectionLogger.Debug("Setting server timezone offset to %s", str[len(str)-3:]) + connectionLogger.Debug("Setting server timezone offset to %s", v.serverTZOffset) return nil } +func getTimeZoneOffset(str string) string { + for i := len(str) - 1; i >= 0 && i >= len(str)-8; i-- { + ch := str[i] + if ch == '+' || ch == '-' { + return str[i:] + } + } + return "+00" +} + func (v *connection) defaultMessageHandler(bMsg msgs.BackEndMsg) (bool, error) { handled := true diff --git a/vendor/github.com/vertica/vertica-sql-go/driver.go b/vendor/github.com/vertica/vertica-sql-go/driver.go index 8c2b10bd..bee34c4e 100644 --- a/vendor/github.com/vertica/vertica-sql-go/driver.go +++ b/vendor/github.com/vertica/vertica-sql-go/driver.go @@ -46,8 +46,8 @@ type Driver struct{} const ( driverName string = "vertica-sql-go" - driverVersion string = "1.3.2" - protocolVersion uint32 = 0x0003000C + driverVersion string = "1.3.3" + protocolVersion uint32 = 0x0003000F // 3.15 ) var driverLogger = logger.New("driver") diff --git a/vendor/github.com/vertica/vertica-sql-go/msgs/festartupmsg.go b/vendor/github.com/vertica/vertica-sql-go/msgs/festartupmsg.go index 7625d7f6..d1e62b7b 100644 --- a/vendor/github.com/vertica/vertica-sql-go/msgs/festartupmsg.go +++ b/vendor/github.com/vertica/vertica-sql-go/msgs/festartupmsg.go @@ -34,6 +34,7 @@ package msgs import ( "fmt" + "os" "os/user" "github.com/elastic/go-sysinfo" @@ -52,6 +53,8 @@ type FEStartupMsg struct { OSUsername string Autocommit string OAuthAccessToken string + ClientOSHostname string + Workload string } // Flatten docs @@ -70,6 +73,12 @@ func (m *FEStartupMsg) Flatten() ([]byte, byte) { m.OSUsername = currentUser.Username } + m.ClientOSHostname = "" + hostname, err := os.Hostname() + if err == nil { + m.ClientOSHostname = hostname + } + buf := newMsgBuffer() const fixedProtocolVersion uint32 = 0x00030005 buf.appendUint32(fixedProtocolVersion) @@ -97,6 +106,9 @@ func (m *FEStartupMsg) Flatten() ([]byte, byte) { buf.appendLabeledString("client_os", m.ClientOS) buf.appendLabeledString("client_os_user_name", m.OSUsername) buf.appendLabeledString("autocommit", m.Autocommit) + buf.appendLabeledString("protocol_compat", "VER") + buf.appendLabeledString("client_os_hostname", m.ClientOSHostname) + buf.appendLabeledString("workload", m.Workload) buf.appendBytes([]byte{0}) return buf.bytes(), 0 @@ -104,7 +116,7 @@ func (m *FEStartupMsg) Flatten() ([]byte, byte) { func (m *FEStartupMsg) String() string { return fmt.Sprintf( - "Startup (packet): ProtocolVersion:%08X, DriverName='%s', DriverVersion='%s', UserName='%s', Database='%s', SessionID='%s', ClientPID=%d, ClientOS='%s', ClientOSUserName='%s', Autocommit='%s', OAuthAccessToken=", + "Startup (packet): ProtocolVersion:%08X, DriverName='%s', DriverVersion='%s', UserName='%s', Database='%s', SessionID='%s', ClientPID=%d, ClientOS='%s', ClientOSUserName='%s', ClientOSHostname='%s', Autocommit='%s', OAuthAccessToken=, Workload='%s'", m.ProtocolVersion, m.DriverName, m.DriverVersion, @@ -114,6 +126,8 @@ func (m *FEStartupMsg) String() string { m.ClientPID, m.ClientOS, m.OSUsername, + m.ClientOSHostname, m.Autocommit, - len(m.OAuthAccessToken)) + len(m.OAuthAccessToken), + m.Workload) } diff --git a/vendor/github.com/google/flatbuffers/LICENSE.txt b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE similarity index 99% rename from vendor/github.com/google/flatbuffers/LICENSE.txt rename to vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE index d6456956..261eeb9e 100644 --- a/vendor/github.com/google/flatbuffers/LICENSE.txt +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 00000000..92b8cf73 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of intitialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one. +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 00000000..9509014e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Server HTTP metrics. +const ( + RequestCount = "http.server.request_count" // Incoming request count total + RequestContentLength = "http.server.request_content_length" // Incoming request bytes total + ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total + ServerLatency = "http.server.duration" // Incoming end to end duration, milliseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 00000000..a1b5b5e5 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// ScopeName is the instrumentation scope name. +const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionnally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 00000000..38c7f01c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 00000000..af84f0e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,283 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "io" + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" +) + +// middleware is an http middleware which wraps the next handler in a span. +type middleware struct { + operation string + server string + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler in a span named after the operation and +// enriches it with metrics. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + return NewMiddleware(operation, opts...)(handler) +} + +// NewMiddleware returns a tracing and metrics instrumentation middleware. +// The handler returned by the middleware wraps a handler +// in a span named after the operation and enriches it with metrics. +func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { + h := middleware{ + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + h.createMeasures() + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.serveHTTP(w, r, next) + }) + } +} + +func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +func (h *middleware) createMeasures() { + var err error + h.requestBytesCounter, err = h.meter.Int64Counter( + RequestContentLength, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request content length (uncompressed)"), + ) + handleErr(err) + + h.responseBytesCounter, err = h.meter.Int64Counter( + ResponseContentLength, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response content length (uncompressed)"), + ) + handleErr(err) + + h.serverLatencyMeasure, err = h.meter.Float64Histogram( + ServerLatency, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of HTTP request handling"), + ) + handleErr(err) +} + +// serveHTTP sets up tracing and calls the given next http.Handler with the span +// context injected into the request context. +func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), + } + if h.server != "" { + hostAttr := semconv.NetHostName(h.server) + opts = append(opts, trace.WithAttributes(hostAttr)) + } + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + var bw bodyWrapper + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { + bw.ReadCloser = r.Body + bw.record = readRecordFunc + r.Body = &bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + }) + + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + + next.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics + attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) + if rww.statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) + } + o := metric.WithAttributes(attributes...) + h.requestBytesCounter.Add(ctx, bw.read, o) + h.responseBytesCounter.Add(ctx, rww.written, o) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.serverLatencyMeasure.Record(ctx, elapsedTime, o) +} + +func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { + attributes := []attribute.KeyValue{} + + // TODO: Consider adding an event after each read and write, possibly as an + // option (defaulting to off), so as to not create needlessly verbose spans. + if read > 0 { + attributes = append(attributes, ReadBytesKey.Int64(read)) + } + if rerr != nil && rerr != io.EOF { + attributes = append(attributes, ReadErrorKey.String(rerr.Error())) + } + if wrote > 0 { + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) + + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) +} + +// WithRouteTag annotates spans and metrics with the provided route name +// with HTTP route attribute. +func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + attr := semconv.HTTPRouteKey.String(route) + + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(attr) + + labeler, _ := LabelerFromContext(r.Context()) + labeler.Add(attr) + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go new file mode 100644 index 00000000..edf4ce3d --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +// Generate semconvutil package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go new file mode 100644 index 00000000..794d4c26 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -0,0 +1,602 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +// HTTPClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) +func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// HTTPClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func HTTPClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// HTTPClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func HTTPClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// HTTPServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + +// HTTPServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func HTTPServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// HTTPRequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPRequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// HTTPResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} + +// httpConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type httpConv struct { + NetConv *netConv + + HTTPClientIPKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + UserAgentOriginalKey attribute.Key +} + +var hc = &httpConv{ + NetConv: nc, + + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + UserAgentOriginalKey: semconv.UserAgentOriginalKey, +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.status_code int + http.response_content_length int + */ + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "user_agent.original". +func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + user_agent.original string + http.url string + net.peer.name string + net.peer.port int + http.request_content_length int + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. See ClientResponse. + http.response_content_length This requires the response. See ClientResponse. + net.sock.family This requires the socket used. + net.sock.peer.addr This requires the socket used. + net.sock.peer.name This requires the socket used. + net.sock.peer.port This requires the socket used. + http.resend_count This is something outside of a single request. + net.protocol.name The value is the Request is ignored, and the go client will always use "http". + net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. + */ + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.target", "net.host.name". The following attributes are returned if they +// related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip", +// "net.protocol.name", "net.protocol.version". +func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.scheme string + net.host.name string + net.host.port int + net.sock.peer.addr string + net.sock.peer.port int + user_agent.original string + http.client_ip string + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + http.target string Note: doesn't include the query parameter. + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. + http.request_content_length This requires the len() of body, which can mutate it. + http.response_content_length This requires the response. + http.route This is not available. + net.sock.peer.name This would require a DNS lookup. + net.sock.host.addr The request doesn't have access to the underlying socket. + net.sock.host.port The request doesn't have access to the underlying socket. + + */ + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + + var target string + if req.URL != nil { + target = req.URL.Path + if target != "" { + n++ + } + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + n++ + } + if protoVersion != "" { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + if target != "" { + attrs = append(attrs, c.HTTPTargetKey.String(target)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + + return attrs +} + +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.scheme string + http.route string + http.method string + http.status_code int + net.host.name string + net.host.port int + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + */ + + n := 3 // Method, scheme, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + if protoName != "" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + + return attrs +} + +func (c *httpConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *httpConv) ClientStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *httpConv) ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go new file mode 100644 index 00000000..cb4cb935 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -0,0 +1,378 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/netconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" +) + +// NetTransport returns a trace attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func NetTransport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// NetClient returns trace attributes for a client network connection to address. +// See net.Dial for information about acceptable address values, address should +// be the same as the one used to create conn. If conn is nil, only network +// peer attributes will be returned that describe address. Otherwise, the +// socket level information about conn will also be included. +func NetClient(address string, conn net.Conn) []attribute.KeyValue { + return nc.Client(address, conn) +} + +// NetServer returns trace attributes for a network listener listening at address. +// See net.Listen for information about acceptable address values, address +// should be the same as the one used to create ln. If ln is nil, only network +// host attributes will be returned that describe address. Otherwise, the +// socket level information about ln will also be included. +func NetServer(address string, ln net.Listener) []attribute.KeyValue { + return nc.Server(address, ln) +} + +// netConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type netConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetProtocolName attribute.Key + NetProtocolVersion attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +var nc = &netConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetProtocolName: semconv.NetProtocolNameKey, + NetProtocolVersion: semconv.NetProtocolVersionKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +func (c *netConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *netConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { + if ln == nil { + return c.Host(address) + } + + lAddr := ln.Addr() + if lAddr == nil { + return c.Host(address) + } + + hostName, hostPort := splitHostPort(address) + sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) + network := lAddr.Network() + sockFamily := family(network, sockHostAddr) + + n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) + n += positiveInt(hostPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if hostName != "" { + attr = append(attr, c.HostName(hostName)) + if hostPort > 0 { + // Only if net.host.name is set should net.host.port be. + attr = append(attr, c.HostPort(hostPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func (c *netConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *netConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { + if conn == nil { + return c.Peer(address) + } + + lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() + + var network string + switch { + case lAddr != nil: + network = lAddr.Network() + case rAddr != nil: + network = rAddr.Network() + default: + return c.Peer(address) + } + + peerName, peerPort := splitHostPort(address) + var ( + sockFamily string + sockPeerAddr string + sockPeerPort int + sockHostAddr string + sockHostPort int + ) + + if lAddr != nil { + sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) + } + + if rAddr != nil { + sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) + } + + switch { + case sockHostAddr != "": + sockFamily = family(network, sockHostAddr) + case sockPeerAddr != "": + sockFamily = family(network, sockPeerAddr) + } + + n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) + n += positiveInt(peerPort, sockPeerPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if peerName != "" { + attr = append(attr, c.PeerName(peerName)) + if peerPort > 0 { + // Only if net.peer.name is set should net.peer.port be. + attr = append(attr, c.PeerPort(peerPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockPeerAddr != "" { + attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) + if sockPeerPort > 0 { + // Only if net.sock.peer.addr is set should net.sock.peer.port be. + attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) + } + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +func nonZeroStr(strs ...string) int { + var n int + for _, str := range strs { + if str != "" { + n++ + } + } + return n +} + +func positiveInt(ints ...int) int { + var n int + for _, i := range ints { + if i > 0 { + n++ + } + } + return n +} + +// Peer returns attributes for a network peer address. +func (c *netConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *netConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *netConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *netConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 00000000..26a51a18 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +func injectLabeler(ctx context.Context, l *Labeler) context.Context { + return context.WithValue(ctx, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 00000000..e835cac1 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,193 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span and injects the span context into the outbound request headers. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper. +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + span.SetAttributes(semconvutil.HTTPClientResponse(res)...) + span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + res.Body = newWrappedBody(span, res.Body) + + return res, err +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + body io.ReadCloser +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Close() error { + wb.span.End() + if wb.body != nil { + return wb.body.Close() + } + return nil +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 00000000..9a4a0214 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.47.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go new file mode 100644 index 00000000..11a35ed1 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + + "go.opentelemetry.io/otel/propagation" +) + +var _ io.ReadCloser = &bodyWrapper{} + +// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type bodyWrapper struct { + io.ReadCloser + record func(n int64) // must not be nil + + read int64 + err error +} + +func (w *bodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + w.read += n1 + w.err = err + w.record(n1) + return n, err +} + +func (w *bodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +var _ http.ResponseWriter = &respWriterWrapper{} + +// respWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) +// that may be useful when using it in real life situations. +type respWriterWrapper struct { + http.ResponseWriter + record func(n int64) // must not be nil + + // used to inject the header + ctx context.Context + + props propagation.TextMapPropagator + + written int64 + statusCode int + err error + wroteHeader bool +} + +func (w *respWriterWrapper) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *respWriterWrapper) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.record(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *respWriterWrapper) WriteHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore new file mode 100644 index 00000000..ae6a3bcf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellignore @@ -0,0 +1,5 @@ +ot +fo +te +collison +consequentially diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc new file mode 100644 index 00000000..4afbb1fb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.codespellrc @@ -0,0 +1,10 @@ +# https://github.com/codespell-project/codespell +[codespell] +builtin = clear,rare,informal +check-filenames = +check-hidden = +ignore-words = .codespellignore +interactive = 1 +skip = .git,go.mod,go.sum,semconv,venv,.tools +uri-ignore-words-list = * +write = diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 00000000..314766e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 00000000..895c7664 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,22 @@ +.DS_Store +Thumbs.db + +.tools/ +venv/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* +go.work +go.work.sum + +gen/ + +/example/dice/dice +/example/namedtracer/namedtracer +/example/otel-collector/otel-collector +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/zipkin/zipkin diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules new file mode 100644 index 00000000..38a1f569 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitmodules @@ -0,0 +1,3 @@ +[submodule "opentelemetry-proto"] + path = exporters/otlp/internal/opentelemetry-proto + url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 00000000..a62511f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,296 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - depguard + - errcheck + - godot + - gofumpt + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - typecheck + - unused + +issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. + exclude-rules: + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" + linters: + - revive + # Yes, they are, but it's okay in a test. + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + # It's okay to not run gosec in a test. + - path: _test\.go + linters: + - gosec + # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # as we commonly use it in tests and examples. + - text: "G404:" + linters: + - gosec + # Igonoring gosec G402: TLS MinVersion too low + # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. + - text: "G402: TLS MinVersion too low." + linters: + - gosec + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 + +linters-settings: + depguard: + rules: + non-tests: + files: + - "!$test" + - "!**/*test/*.go" + - "!**/internal/matchers/*.go" + deny: + - pkg: "testing" + - pkg: "github.com/stretchr/testify" + - pkg: "crypto/md5" + - pkg: "crypto/sha1" + - pkg: "crypto/**/pkix" + otlp-internal: + files: + - "!**/exporters/otlp/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" + desc: Do not use cross-module internal packages. + otlptrace-internal: + files: + - "!**/exporters/otlp/otlptrace/*.go" + - "!**/exporters/otlp/otlptrace/internal/**.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal" + desc: Do not use cross-module internal packages. + otlpmetric-internal: + files: + - "!**/exporters/otlp/otlpmetric/internal/*.go" + - "!**/exporters/otlp/otlpmetric/internal/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" + desc: Do not use cross-module internal packages. + otel-internal: + files: + - "**/sdk/*.go" + - "**/sdk/**/*.go" + - "**/exporters/*.go" + - "**/exporters/**/*.go" + - "**/schema/*.go" + - "**/schema/**/*.go" + - "**/metric/*.go" + - "**/metric/**/*.go" + - "**/bridge/*.go" + - "**/bridge/**/*.go" + - "**/example/*.go" + - "**/example/**/*.go" + - "**/trace/*.go" + - "**/trace/**/*.go" + deny: + - pkg: "go.opentelemetry.io/otel/internal$" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/attribute" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/internaltest" + desc: Do not use cross-module internal packages. + - pkg: "go.opentelemetry.io/otel/internal/matchers" + desc: Do not use cross-module internal packages. + godot: + exclude: + # Exclude links. + - '^ *\[[^]]+\]:' + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io + misspell: + locale: US + ignore-words: + - cancelled + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 00000000..40d62fa2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,6 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects +file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries +file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 00000000..3202496c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 00000000..fe670d79 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,2859 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.22.0/0.45.0] 2024-01-17 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.22.0` package. + The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735) +- The `go.opentelemetry.io/otel/semconv/v1.23.0` package. + The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746) +- The `go.opentelemetry.io/otel/semconv/v1.23.1` package. + The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749) +- The `go.opentelemetry.io/otel/semconv/v1.24.0` package. + The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) +- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) +- Experimental cardinality limiting is added to the metric SDK. + See [metric documentation](./sdk/metric/EXPERIMENTAL.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) +- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) + +### Changed + +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.19.0` version of the OpenTelemetry specification. (#4754) +- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. + If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) +- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) +- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721) +- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743) +- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774) +- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775) +- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818) +- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804) + +### Fixed + +- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755) +- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756) +- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772) +- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776) +- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804) +- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742) + +## [1.21.0/0.44.0] 2023-11-16 + +### Removed + +- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706) +- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707) +- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708) +- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723) + +### Fixed + +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719) +- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719) + +## [1.20.0/0.43.0] 2023-11-10 + +This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this. + +### Added + +- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567) +- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584) +- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620) +- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620) +- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644) +- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660) +- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660) +- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622) +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585) +- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605) +- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668) + +### Deprecated + +- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567) +- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618) +- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`. + Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620) +- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649) +- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693) + +### Changed + +- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583) +- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type. + This extends the `TracerProvider` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type. + This extends the `Tracer` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type. + This extends the `Span` interface and is is a breaking change for any existing implementation. + Implementors need to update their implementations based on what they want the default behavior of the interface to be. + See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670) +- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669) +- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679) +- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679) + +### Fixed + +- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699) +- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699) +- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695) +- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695) + +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + +## [1.17.0/0.40.0/0.0.5] 2023-08-28 + +### Added + +- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244) +- Add support for exponential histogram aggregations. + A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272) +- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287) +- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306) +- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315) +- The `go.opentelemetry.io/otel/semconv/v1.21.0` package. + The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362) +- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365) +- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381) +- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374) +- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435) +- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437) +- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444) +- Support Go 1.21. (#4463) + +### Changed + +- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145) +- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202) +- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210) +- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244) +- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244) +- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272) +- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290) +- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289) +- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332) +- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333) +- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377) +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408) +- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434) +- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346) + +### Removed + +- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`. + Use the added `WithProducer` option instead. (#4346) +- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`. + Notice that `PeriodicReader.ForceFlush` is still available. (#4375) + +### Fixed + +- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143) +- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307) +- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317) +- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337) +- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338) +- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350) +- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350) +- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349) +- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353) +- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846) +- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846) +- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395) +- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373) +- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409) +- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated. + OpenTelemetry dropped support for Jaeger exporter in July 2023. + Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` + or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423) +- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420) +- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425) +- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated. + Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435) + +## [1.16.0/0.39.0] 2023-05-18 + +This release contains the first stable release of the OpenTelemetry Go [metric API]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.19.0` package. + The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848) +- The `go.opentelemetry.io/otel/semconv/v1.20.0` package. + The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078) +- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165) +- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222) +- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271) + +### Changed + +- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049) +- `MeterProvider` returns noop meters once it has been shutdown. (#4154) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed. + Use `go.opentelemetry.io/otel/metric` instead. (#4055) + +### Fixed + +- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077) + +## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03 + +This is a release candidate for the v1.16.0/v0.39.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set. + This stages the metric API to be released as a stable module. (#4038) + +### Removed + +- The `go.opentelemetry.io/otel/metric/global` package is removed. + Use `go.opentelemetry.io/otel` instead. (#4039) + +## [1.15.1/0.38.1] 2023-05-02 + +### Fixed + +- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041) + +## [1.15.0/0.38.0] 2023-04-27 + +### Added + +- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916) +- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949) +- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970) +- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971) + - The `AddConfig` used to hold configuration for addition measurements + - `NewAddConfig` used to create a new `AddConfig` + - `AddOption` used to configure an `AddConfig` + - The `RecordConfig` used to hold configuration for recorded measurements + - `NewRecordConfig` used to create a new `RecordConfig` + - `RecordOption` used to configure a `RecordConfig` + - The `ObserveConfig` used to hold configuration for observed measurements + - `NewObserveConfig` used to create a new `ObserveConfig` + - `ObserveOption` used to configure an `ObserveConfig` +- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`. + They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956) +- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956) + +### Changed + +- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870) +- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`. + This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916) +- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941) + - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider` +- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966) +- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974) +- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Int64Counter.Add` method now accepts `...AddOption` + - The `Float64Counter.Add` method now accepts `...AddOption` + - The `Int64UpDownCounter.Add` method now accepts `...AddOption` + - The `Float64UpDownCounter.Add` method now accepts `...AddOption` + - The `Int64Histogram.Record` method now accepts `...RecordOption` + - The `Float64Histogram.Record` method now accepts `...RecordOption` + - The `Int64Observer.Observe` method now accepts `...ObserveOption` + - The `Float64Observer.Observe` method now accepts `...ObserveOption` +- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971) + - The `Observer.ObserveInt64` method now accepts `...ObserveOption` + - The `Observer.ObserveFloat64` method now accepts `...ObserveOption` +- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986) + +### Fixed + +- `TracerProvider` allows calling `Tracer()` while it's shutting down. + It used to deadlock. (#3924) +- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949) +- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951) +- Automatically figure out the default aggregation with `aggregation.Default`. (#3967) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated. + Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018) + +## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812) +- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828) +- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849) +- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895) +- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900) +- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854) + +### Changed + +- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832) +- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832) +- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833) +- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844) +- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849) +- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853) +- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892) +- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895) +- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900) + +### Fixed + +- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829) +- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892) +- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `float64` instrument configuration instead. (#3895) +- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`. + Use the added `int64` instrument configuration instead. (#3895) +- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893) + +## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01 + +This is a release candidate for the v1.15.0/v0.38.0 release. +That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +This release drops the compatibility guarantee of [Go 1.18]. + +### Added + +- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818) + - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`. + - Use `GetMeterProivder` for a global `metric.MeterProvider`. + - Use `SetMeterProivder` to set the global `metric.MeterProvider`. + +### Changed + +- Dropped compatibility testing for [Go 1.18]. + The project no longer guarantees support for this version of Go. (#3813) + +### Fixed + +- Handle empty environment variable as it they were not set. (#3764) +- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823) +- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899) +- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/global` package is deprecated. + Use `go.opentelemetry.io/otel` instead. (#3818) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814) + +## [1.14.0/0.37.0/0.0.4] 2023-02-27 + +This release is the last to support [Go 1.18]. +The next release will require at least [Go 1.19]. + +### Added + +- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) +- Support [Go 1.20]. (#3693) +- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. + The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) + - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeNameKey` -> `OTelScopeNameKey` + - `OtelScopeVersionKey` -> `OTelScopeVersionKey` + - `OtelLibraryNameKey` -> `OTelLibraryNameKey` + - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` + - `OtelStatusCodeKey` -> `OTelStatusCodeKey` + - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` + - `OtelStatusCodeOk` -> `OTelStatusCodeOk` + - `OtelStatusCodeError` -> `OTelStatusCodeError` + - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: + - `OtelScopeName` -> `OTelScopeName` + - `OtelScopeVersion` -> `OTelScopeVersion` + - `OtelLibraryName` -> `OTelLibraryName` + - `OtelLibraryVersion` -> `OTelLibraryVersion` + - `OtelStatusDescription` -> `OTelStatusDescription` +- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. + See the [README](./bridge/opentracing/README.md) for more information. (#3570) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) +- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) +- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) + - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. + - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. + +### Changed + +- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) +- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. + This change is made to enable memory reuse by SDK users. (#3732) +- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) + +### Fixed + +- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) +- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) +- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) +- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) +- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) +- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) +- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) + +### Deprecated + +- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. + Use the equivalent unit string instead. (#3776) + - Use `"1"` instead of `unit.Dimensionless` + - Use `"By"` instead of `unit.Bytes` + - Use `"ms"` instead of `unit.Milliseconds` + +## [1.13.0/0.36.0] 2023-02-07 + +### Added + +- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. + These functions ensure semantic convention type correctness. (#3675) + +### Fixed + +- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) + - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` + - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` + +### Removed + +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) +- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) + +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Re-enabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### âš ï¸ Notice âš ï¸ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### âš ï¸ Notice âš ï¸ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql workflow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-colector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.22.0...HEAD +[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 +[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 +[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 +[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 +[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 +[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 +[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1 +[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0 +[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2 +[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1 +[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 +[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + +[Go 1.20]: https://go.dev/doc/go1.20 +[Go 1.19]: https://go.dev/doc/go1.19 +[Go 1.18]: https://go.dev/doc/go1.18 + +[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric +[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 00000000..62374000 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/community-membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu + +CODEOWNERS @MrAlias @MadVikingGod @pellared \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 00000000..31857a61 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,645 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +Additionally, there is a `codespell` target that checks for common +typos in the code. It is not run by default, but you can run it +manually with `make codespell`. It will set up a virtual environment +in `venv` and install `codespell` there. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request. +Rewriting Git history makes it difficult to keep track of iterations during code review. +All pull requests are squashed to a single commit upon merge to `main`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered **ready to merge** when: + +* It has received two qualified approvals[^1]. + + This is not enforced through automation, but needs to be validated by the + maintainer merging. + * The qualified approvals need to be from [Approver]s/[Maintainer]s + affiliated with different companies. Two qualified approvals from + [Approver]s or [Maintainer]s affiliated with the same company counts as a + single qualified approval. + * PRs introducing changes that have already been discussed and consensus + reached only need one qualified approval. The discussion and resolution + needs to be linked to the PR. + * Trivial changes[^2] only need one qualified approval. + +* All feedback has been addressed. + * All PR comments and suggestions are resolved. + * All GitHub Pull Request reviews with a status of "Request changes" have + been addressed. Another review by the objecting reviewer with a different + status can be submitted to clear the original review, or the review can be + dismissed by a [Maintainer] when the issues from the original review have + been addressed. + * Any comments or reviews that cannot be resolved between the PR author and + reviewers can be submitted to the community [Approver]s and [Maintainer]s + during the weekly SIG meeting. If consensus is reached among the + [Approver]s and [Maintainer]s during the SIG meeting the objections to the + PR may be dismissed or resolved or the PR closed by a [Maintainer]. + * Any substantive changes to the PR require existing Approval reviews be + cleared unless the approver explicitly states that their approval persists + across changes. This includes changes resulting from other feedback. + [Approver]s and [Maintainer]s can help in clearing reviews and they should + be consulted if there are any questions. + +* The PR branch is up to date with the base branch it is merging into. + * To ensure this does not block the PR, it should be configured to allow + maintainers to update it. + +* It has been open for review for at least one working day. This gives people + reasonable time to review. + * Trivial changes[^2] do not have to wait for one day and may be merged with + a single [Maintainer]'s approval. + +* All required GitHub workflows have succeeded. +* Urgent fix can take exception as long as it has been actively communicated + among [Maintainer]s. + +Any [Maintainer] can merge the PR once the above criteria have been met. + +[^1]: A qualified approval is a GitHub Pull Request review with "Approve" + status from an OpenTelemetry Go [Approver] or [Maintainer]. +[^2]: Trivial changes include: typo corrections, cosmetic non-substantive + changes, documentation corrections or updates, dependency updates, etc. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel). + +It's especially valuable to read through the [library +guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each (non-internal, non-test) package must be documented using +[Go Doc Comments](https://go.dev/doc/comment), +preferably in a `doc.go` file. + +Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples) +instead of putting code snippets in Go doc comments. +In some cases, you can even create [Testable Examples](https://go.dev/blog/examples). + +You can install and run a "local Go Doc site" in the following way: + + ```sh + go install golang.org/x/pkgsite/cmd/pkgsite@latest + pkgsite + ``` + +[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) +is an example of a very well-documented package. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefore it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Perform any validation here. + return config +} +``` + +If validation of the `config` options is also performed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +documentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +These interfaces are defined by the OpenTelemetry specification and will be +updated as the specification evolves. + +Otherwise, stable interfaces MUST NOT be modified. + +#### How to Change Specification Interfaces + +When an API change must be made, we will update the SDK with the new method one +release before the API change. This will allow the SDK one version before the +API change to work seamlessly with the new API. + +If an incompatible version of the SDK is used with the new API the application +will fail to compile. + +#### How Not to Change Specification Interfaces + +We have explored using a v2 of the API to change interfaces and found that there +was no way to introduce a v2 and have it work seamlessly with the v1 of the API. +Problems happened with libraries that upgraded to v2 when an application did not, +and would not produce any telemetry. + +More detail of the approaches considered and their limitations can be found in +the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920) +issue. + +#### How to Change Other Interfaces + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +### Testing + +The tests should never leak goroutines. + +Use the term `ConcurrentSafe` in the test name when it aims to verify the +absence of race conditions. + +### Internal packages + +The use of internal packages should be scoped to a single module. A sub-module +should never import from a parent internal package. This creates a coupling +between the two modules where a user can upgrade the parent without the child +and if the internal package API has changed it will fail to upgrade[^3]. + +There are two known exceptions to this rule: + +- `go.opentelemetry.io/otel/internal/global` + - This package manages global state for all of opentelemetry-go. It needs to + be a single package in order to ensure the uniqueness of the global state. +- `go.opentelemetry.io/otel/internal/baggage` + - This package provides values in a `context.Context` that need to be + recognized by `go.opentelemetry.io/otel/baggage` and + `go.opentelemetry.io/otel/bridge/opentracing` but remain private. + +If you have duplicate code in multiple modules, make that code into a Go +template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl] +to render the templates in the desired locations. See [#4404] for an example of +this. + +[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 + +### Ignoring context cancellation + +OpenTelemetry API implementations need to ignore the cancellation of the context that are +passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). +Recording methods should not return an error describing the cancellation state of the context +when they complete, nor should they abort any work. + +This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for +the method. In that case the context cancellation can be used for the timeout with the +restriction that this behavior is documented for the method. Otherwise, timeouts +are expected to be handled by the user calling the API, not the implementation. + +Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method +of a provider. It is assumed the context passed from a user is not used for this purpose. + +Outside of the direct recording of telemetry from the API (e.g. exporting telemetry, +force flushing telemetry, shutting down a signal provider) the context cancellation +should be honored. This means all work done on behalf of the user provided context +should be canceled. + +## Approvers and Maintainers + +### Approvers + +- [Evan Torrie](https://github.com/evantorrie), Verizon Media +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [David Ashpole](https://github.com/dashpole), Google +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Elastic +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS + +### Maintainers + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Robert PajÄ…k](https://github.com/pellared), Splunk +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Emeritus + +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). + +[Approver]: #approvers +[Maintainer]: #maintainers +[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl +[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404 diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 00000000..35fc1899 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,318 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default +ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +GOTMPL = $(TOOLS)/gotmpl +$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl + +GORELEASE = $(TOOLS)/gorelease +$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease + +GOVULNCHECK = $(TOOLS)/govulncheck +$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck + +.PHONY: tools +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) + +# Virtualized python tools via docker + +# The directory where the virtual environment is created. +VENVDIR := venv + +# The directory where the python tools are installed. +PYTOOLS := $(VENVDIR)/bin + +# The pip executable in the virtual environment. +PIP := $(PYTOOLS)/pip + +# The directory in the docker image where the current directory is mounted. +WORKDIR := /workdir + +# The python image to use for the virtual environment. +PYTHONIMAGE := python:3.11.3-slim-bullseye + +# Run the python image with the current directory mounted. +DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) + +# Create a virtual environment for Python tools. +$(PYTOOLS): +# The `--upgrade` flag is needed to ensure that the virtual environment is +# created with the latest pip version. + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + +# Install python packages into the virtual environment. +$(PYTOOLS)/%: | $(PYTOOLS) + @$(DOCKERPY) $(PIP) install -r requirements.txt + +CODESPELL = $(PYTOOLS)/codespell +$(CODESPELL): PACKAGE=codespell + +# Generate + +.PHONY: generate +generate: go-generate vanity-import-fix + +.PHONY: go-generate +go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) +go-generate/%: DIR=$* +go-generate/%: | $(STRINGER) $(GOTMPL) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . + +# Generate go.work file for local development. +.PHONY: go-work +go-work: | $(CROSSLINK) + $(CROSSLINK) work --root=$(shell pwd) + +# Build + +.PHONY: build + +build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: | $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | grep -v 'semconv/v.*' \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. +BENCHMARK_TARGETS := sdk/trace +.PHONY: benchmark +benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) +BENCHMARK_FILTER = . +# You can override the filter for a particular directory by adding a rule here. +benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark/%: + @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + && cd $* \ + $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: | $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: | $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy -compat=1.20 + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint govulncheck + +.PHONY: vanity-import-check +vanity-import-check: | $(PORTO) + @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) + +.PHONY: misspell +misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: govulncheck +govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) +govulncheck/%: DIR=$* +govulncheck/%: | $(GOVULNCHECK) + @echo "govulncheck ./... in $(DIR)" \ + && cd $(DIR) \ + && $(GOVULNCHECK) ./... + +.PHONY: codespell +codespell: | $(CODESPELL) + @$(DOCKERPY) $(CODESPELL) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +DEPENDABOT_CONFIG = .github/dependabot.yml +.PHONY: dependabot-check +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) + [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + +.PHONY: gorelease +gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) +gorelease/%: DIR=$* +gorelease/%:| $(GORELEASE) + @echo "gorelease in $(DIR):" \ + && cd $(DIR) \ + && $(GORELEASE) \ + || echo "" + +.PHONY: prerelease +prerelease: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} + +.PHONY: lint-markdown +lint-markdown: + docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 00000000..44e1bfc9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,108 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | +|---------|------------| +| Traces | Stable | +| Metrics | Stable | +| Logs | Design [1] | + +- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)). + No Logs Pull Requests are currently being accepted. + +Progress and status specific to this repository is tracked in our +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](VERSIONING.md). + +### Compatibility + +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): + +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. + +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. + +| OS | Go Version | Architecture | +|---------|------------|--------------| +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.20 | amd64 | +| Ubuntu | 1.21 | 386 | +| Ubuntu | 1.20 | 386 | +| MacOS | 1.21 | amd64 | +| MacOS | 1.20 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.20 | amd64 | +| Windows | 1.21 | 386 | +| Windows | 1.20 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +|---------------------------------------|:-------:|:------:| +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 00000000..d2691d0b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,139 @@ +# Release Process + +## Semantic Convention Generation + +New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. + +1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. + +For example, + +```sh +export TAG="v1.21.0" # Change to the release version you are generating. +export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +``` + +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. + +## Breaking changes validation + +You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. + +You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go]. +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions +[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ +[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go + +### Demo Repository + +Bump the dependencies in the following Go services: + +- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 00000000..412f1e36 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go new file mode 100644 index 00000000..638c213d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go @@ -0,0 +1,60 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Filter supports removing certain attributes from attribute sets. When +// the filter returns true, the attribute will be kept in the filtered +// attribute set. When the filter returns false, the attribute is excluded +// from the filtered attribute set, and the attribute instead appears in +// the removed list of excluded attributes. +type Filter func(KeyValue) bool + +// NewAllowKeysFilter returns a Filter that only allows attributes with one of +// the provided keys. +// +// If keys is empty a deny-all filter is returned. +func NewAllowKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return false } + } + + allowed := make(map[Key]struct{}) + for _, k := range keys { + allowed[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := allowed[kv.Key] + return ok + } +} + +// NewDenyKeysFilter returns a Filter that only allows attributes +// that do not have one of the provided keys. +// +// If keys is empty an allow-all filter is returned. +func NewDenyKeysFilter(keys ...Key) Filter { + if len(keys) <= 0 { + return func(kv KeyValue) bool { return true } + } + + forbid := make(map[Key]struct{}) + for _, k := range keys { + forbid[k] = struct{}{} + } + return func(kv KeyValue) bool { + _, ok := forbid[kv.Key] + return !ok + } +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 26be5983..7e6765b0 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -18,6 +18,7 @@ import ( "encoding/json" "reflect" "sort" + "sync" ) type ( @@ -38,13 +39,6 @@ type ( iface interface{} } - // Filter supports removing certain attributes from attribute sets. When - // the filter returns true, the attribute will be kept in the filtered - // attribute set. When the filter returns false, the attribute is excluded - // from the filtered attribute set, and the attribute instead appears in - // the removed list of excluded attributes. - Filter func(KeyValue) bool - // Sortable implements sort.Interface, used for sorting KeyValue. This is // an exported type to support a memory optimization. A pointer to one of // these is needed for the call to sort.Stable(), which the caller may @@ -62,6 +56,12 @@ var ( iface: [0]KeyValue{}, }, } + + // sortables is a pool of Sortables used to create Sets with a user does + // not provide one. + sortables = sync.Pool{ + New: func() interface{} { return new(Sortable) }, + } ) // EmptySet returns a reference to a Set with no elements. @@ -91,7 +91,7 @@ func (l *Set) Len() int { // Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil { + if l == nil || !l.equivalent.Valid() { return KeyValue{}, false } value := l.equivalent.reflectValue() @@ -107,7 +107,7 @@ func (l *Set) Get(idx int) (KeyValue, bool) { // Value returns the value of a specified key in this set. func (l *Set) Value(k Key) (Value, bool) { - if l == nil { + if l == nil || !l.equivalent.Valid() { return Value{}, false } rValue := l.equivalent.reflectValue() @@ -191,7 +191,9 @@ func NewSet(kvs ...KeyValue) Set { if len(kvs) == 0 { return empty() } - s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) + srt := sortables.Get().(*Sortable) + s, _ := NewSetWithSortableFiltered(kvs, srt, nil) + sortables.Put(srt) return s } @@ -218,7 +220,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { if len(kvs) == 0 { return empty(), nil } - return NewSetWithSortableFiltered(kvs, new(Sortable), filter) + srt := sortables.Get().(*Sortable) + s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) + sortables.Put(srt) + return s, filtered } // NewSetWithSortableFiltered returns a new Set. @@ -274,52 +279,75 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S position-- kvs[offset], kvs[position] = kvs[position], kvs[offset] } + kvs = kvs[position:] + if filter != nil { - return filterSet(kvs[position:], filter) + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] + } } - return Set{ - equivalent: computeDistinct(kvs[position:]), - }, nil + return Set{equivalent: computeDistinct(kvs)}, nil } -// filterSet reorders kvs so that included keys are contiguous at the end of -// the slice, while excluded keys precede the included keys. -func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - var excluded []KeyValue - - // Move attributes that do not match the filter so they're adjacent before - // calling computeDistinct(). - distinctPosition := len(kvs) - - // Swap indistinct keys forward and distinct keys toward the - // end of the slice. - offset := len(kvs) - 1 - for ; offset >= 0; offset-- { - if filter(kvs[offset]) { - distinctPosition-- - kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] - continue +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] } } - excluded = kvs[:distinctPosition] - - return Set{ - equivalent: computeDistinct(kvs[distinctPosition:]), - }, excluded + return j } // Filter returns a filtered copy of this Set. See the documentation for // NewSetWithSortableFiltered for more details. func (l *Set) Filter(re Filter) (Set, []KeyValue) { if re == nil { - return Set{ - equivalent: l.equivalent, - }, nil + return *l, nil + } + + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } } - // Note: This could be refactored to avoid the temporary slice - // allocation, if it proves to be expensive. - return filterSet(l.ToSlice(), re) + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil + } + + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] } // computeDistinct returns a Distinct using either the fixed- or diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 34a4e548..cb21dd5c 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -68,7 +68,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)} + return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} } // IntValue creates an INT64 Value. @@ -99,7 +99,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)} + return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -112,7 +112,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)} + return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} } // StringValue creates a STRING Value. @@ -125,7 +125,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)} + return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} } // Type returns a type of the Value. @@ -149,7 +149,7 @@ func (v Value) AsBoolSlice() []bool { } func (v Value) asBoolSlice() []bool { - return attribute.AsSlice[bool](v.slice) + return attribute.AsBoolSlice(v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -168,7 +168,7 @@ func (v Value) AsInt64Slice() []int64 { } func (v Value) asInt64Slice() []int64 { - return attribute.AsSlice[int64](v.slice) + return attribute.AsInt64Slice(v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -187,7 +187,7 @@ func (v Value) AsFloat64Slice() []float64 { } func (v Value) asFloat64Slice() []float64 { - return attribute.AsSlice[float64](v.slice) + return attribute.AsFloat64Slice(v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -206,7 +206,7 @@ func (v Value) AsStringSlice() []string { } func (v Value) asStringSlice() []string { - return attribute.AsSlice[string](v.slice) + return attribute.AsStringSlice(v.slice) } type unknownValueType struct{} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 00000000..7d27cf77 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,744 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool +} + +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. +func NewKeyProperty(key string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key} + return p, nil +} + +// NewKeyValueProperty returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be precent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewKeyValuePropertyRaw] instead +// that does not require precent-encoding of the value. +func NewKeyValueProperty(key, value string) (Property, error) { + if !validateValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewKeyValuePropertyRaw(key, decodedValue) +} + +// NewKeyValuePropertyRaw returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +func NewKeyValuePropertyRaw(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + p, ok := parsePropertyInternal(property) + if !ok { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !validateKey(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally, a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a header string compliant with the W3C Baggage +// specification. +func (p Property) String() string { + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a header string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, len(p)) + for i, prop := range p { + props[i] = prop.String() + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be precent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewMemberRaw] instead +// that does not require precent-encoding of the value. +func NewMember(key, value string, props ...Property) (Member, error) { + if !validateValue(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewMemberRaw(key, decodedValue, props...) +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +func NewMemberRaw(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var props properties + keyValue, properties, found := strings.Cut(member, propertyDelimiter) + if found { + // Parse the member properties. + for _, pStr := range strings.Split(properties, propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + } + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + k, v, found := strings.Cut(keyValue, keyValueDelimiter) + if !found { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key := strings.TrimSpace(k) + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + val := strings.TrimSpace(v) + if !validateValue(val) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) + } + + // Decode a precent-encoded value. + value, err := url.PathUnescape(val) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + } + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// validate ensures m conforms to the W3C Baggage specification. +// A key must be an ASCII string, returning an error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !validateKey(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a header string compliant with the W3C Baggage +// specification. +func (m Member) String() string { + // A key is just an ASCII string. A value is restricted to be + // US-ASCII characters excluding CTLs, whitespace, + // DQUOTE, comma, semicolon, and backslash. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) + if len(m.properties) > 0 { + s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplication. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distinguishing between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members does not have significance. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + hasData: true, + }) + } + return members +} + +// SetMember returns a copy the Baggage with the member included. If the +// baggage contains a Member with the same key the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a header string compliant with the W3C Baggage +// specification. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String()) + } + return strings.Join(members, listDelimiter) +} + +// parsePropertyInternal attempts to decode a Property from the passed string. +// It follows the spec at https://www.w3.org/TR/baggage/#definition. +func parsePropertyInternal(s string) (p Property, ok bool) { + // For the entire function we will use " key = value " as an example. + // Attempting to parse the key. + // First skip spaces at the beginning "< >key = value " (they could be empty). + index := skipSpace(s, 0) + + // Parse the key: " = value ". + keyStart := index + keyEnd := index + for _, c := range s[keyStart:] { + if !validateKeyChar(c) { + break + } + keyEnd++ + } + + // If we couldn't find any valid key character, + // it means the key is either empty or invalid. + if keyStart == keyEnd { + return + } + + // Skip spaces after the key: " key< >= value ". + index = skipSpace(s, keyEnd) + + if index == len(s) { + // A key can have no value, like: " key ". + ok = true + p.key = s[keyStart:keyEnd] + return + } + + // If we have not reached the end and we can't find the '=' delimiter, + // it means the property is invalid. + if s[index] != keyValueDelimiter[0] { + return + } + + // Attempting to parse the value. + // Match: " key =< >value ". + index = skipSpace(s, index+1) + + // Match the value string: " key = ". + // A valid property can be: " key =". + // Therefore, we don't have to check if the value is empty. + valueStart := index + valueEnd := index + for _, c := range s[valueStart:] { + if !validateValueChar(c) { + break + } + valueEnd++ + } + + // Skip all trailing whitespaces: " key = value< >". + index = skipSpace(s, valueEnd) + + // If after looking for the value and skipping whitespaces + // we have not reached the end, it means the property is + // invalid, something like: " key = value value1". + if index != len(s) { + return + } + + // Decode a precent-encoded value. + value, err := url.PathUnescape(s[valueStart:valueEnd]) + if err != nil { + return + } + + ok = true + p.key = s[keyStart:keyEnd] + p.hasValue = true + + p.value = value + return +} + +func skipSpace(s string, offset int) int { + i := offset + for ; i < len(s); i++ { + c := s[i] + if c != ' ' && c != '\t' { + break + } + } + return i +} + +func validateKey(s string) bool { + if len(s) == 0 { + return false + } + + for _, c := range s { + if !validateKeyChar(c) { + return false + } + } + + return true +} + +func validateKeyChar(c int32) bool { + return (c >= 0x23 && c <= 0x27) || + (c >= 0x30 && c <= 0x39) || + (c >= 0x41 && c <= 0x5a) || + (c >= 0x5e && c <= 0x7a) || + c == 0x21 || + c == 0x2a || + c == 0x2b || + c == 0x2d || + c == 0x2e || + c == 0x7c || + c == 0x7e +} + +func validateValue(s string) bool { + for _, c := range s { + if !validateValueChar(c) { + return false + } + } + + return true +} + +func validateValueChar(c int32) bool { + return c == 0x21 || + (c >= 0x23 && c <= 0x2b) || + (c >= 0x2d && c <= 0x3a) || + (c >= 0x3c && c <= 0x5b) || + (c >= 0x5d && c <= 0x7e) +} + +// valueEscape escapes the string so it can be safely placed inside a baggage value, +// replacing special characters with %XX sequences as needed. +// +// The implementation is based on: +// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285. +func valueEscape(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(s[i]) { + const upperhex = "0123456789ABCDEF" + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + + return string(t) +} + +// shouldEscape returns true if the specified byte should be escaped when +// appearing in a baggage value string. +func shouldEscape(c byte) bool { + if c == '%' { + // The percent character must be encoded so that percent-encoding can work. + return true + } + return !validateValueChar(int32(c)) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 00000000..24b34b75 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 00000000..4545100d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go index df3e0f1b..4e328fbb 100644 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -16,6 +16,6 @@ Package codes defines the canonical error codes used by OpenTelemetry. It conforms to [the OpenTelemetry -specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status). */ package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 00000000..36d7c24e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 00000000..72fad854 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 00000000..9a58fb1d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 00000000..4115fe3b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,48 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" +) + +var ( + // Compile-time check global.ErrDelegator implements ErrorHandler. + _ ErrorHandler = (*global.ErrDelegator)(nil) + // Compile-time check global.ErrLogger implements ErrorHandler. + _ ErrorHandler = (*global.ErrLogger)(nil) +) + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() } + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { global.Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go index 22034894..622c3ee3 100644 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -22,24 +22,90 @@ import ( "reflect" ) -// SliceValue convert a slice into an array with same elements as slice. -func SliceValue[T bool | int64 | float64 | string](v []T) any { - var zero T +// BoolSliceValue converts a bool slice into an array with same elements as slice. +func BoolSliceValue(v []bool) interface{} { + var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]T), v) + copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) return cp.Elem().Interface() } -// AsSlice convert an array into a slice into with same elements as array. -func AsSlice[T bool | int64 | float64 | string](v any) []T { +// Int64SliceValue converts an int64 slice into an array with same elements as slice. +func Int64SliceValue(v []int64) interface{} { + var zero int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) + return cp.Elem().Interface() +} + +// Float64SliceValue converts a float64 slice into an array with same elements as slice. +func Float64SliceValue(v []float64) interface{} { + var zero float64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) + return cp.Elem().Interface() +} + +// StringSliceValue converts a string slice into an array with same elements as slice. +func StringSliceValue(v []string) interface{} { + var zero string + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) + return cp.Elem().Interface() +} + +// AsBoolSlice converts a bool array into a slice into with same elements as array. +func AsBoolSlice(v interface{}) []bool { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero bool + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]bool) +} + +// AsInt64Slice converts an int64 array into a slice into with same elements as array. +func AsInt64Slice(v interface{}) []int64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero int64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]int64) +} + +// AsFloat64Slice converts a float64 array into a slice into with same elements as array. +func AsFloat64Slice(v interface{}) []float64 { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero float64 + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]float64) +} + +// AsStringSlice converts a string array into a slice into with same elements as array. +func AsStringSlice(v interface{}) []string { rv := reflect.ValueOf(v) if rv.Type().Kind() != reflect.Array { return nil } - var zero T + var zero string correctLen := rv.Len() correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) cpy := reflect.New(correctType) _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]T) + return cpy.Elem().Slice(0, correctLen).Interface().([]string) } diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 00000000..b96e5408 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 00000000..4469700d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + if v, ok := parent.Value(baggageKey).(baggageState); ok { + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/gen.go b/vendor/go.opentelemetry.io/otel/internal/gen.go new file mode 100644 index 00000000..f532f07e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/gen.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go +//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go +//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go + +//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go +//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go +//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go +//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go +//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go +//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go +//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go new file mode 100644 index 00000000..5e9b8304 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" +) + +var ( + // GlobalErrorHandler provides an ErrorHandler that can be used + // throughout an OpenTelemetry instrumented project. When a user + // specified ErrorHandler is registered (`SetErrorHandler`) all calls to + // `Handle` and will be delegated to the registered ErrorHandler. + GlobalErrorHandler = defaultErrorHandler() + + // Compile-time check that delegator implements ErrorHandler. + _ ErrorHandler = (*ErrDelegator)(nil) + // Compile-time check that errLogger implements ErrorHandler. + _ ErrorHandler = (*ErrLogger)(nil) +) + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) +} + +type ErrDelegator struct { + delegate atomic.Pointer[ErrorHandler] +} + +func (d *ErrDelegator) Handle(err error) { + d.getDelegate().Handle(err) +} + +func (d *ErrDelegator) getDelegate() ErrorHandler { + return *d.delegate.Load() +} + +// setDelegate sets the ErrorHandler delegate. +func (d *ErrDelegator) setDelegate(eh ErrorHandler) { + d.delegate.Store(&eh) +} + +func defaultErrorHandler() *ErrDelegator { + d := &ErrDelegator{} + d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d +} + +// ErrLogger logs errors if no delegate is set, otherwise they are delegated. +type ErrLogger struct { + l *log.Logger +} + +// Handle logs err if no delegate is set, otherwise it is delegated. +func (h *ErrLogger) Handle(err error) { + h.l.Print(err) +} + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return GlobalErrorHandler +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + GlobalErrorHandler.setDelegate(h) +} + +// Handle is a convenience function for ErrorHandler().Handle(err). +func Handle(err error) { + GetErrorHandler().Handle(err) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go new file mode 100644 index 00000000..ebb13c20 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go @@ -0,0 +1,371 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// unwrapper unwraps to return the underlying instrument implementation. +type unwrapper interface { + Unwrap() metric.Observable +} + +type afCounter struct { + embedded.Float64ObservableCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableCounterOption + + delegate atomic.Value // metric.Float64ObservableCounter +} + +var ( + _ unwrapper = (*afCounter)(nil) + _ metric.Float64ObservableCounter = (*afCounter)(nil) +) + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableCounter) + } + return nil +} + +type afUpDownCounter struct { + embedded.Float64ObservableUpDownCounter + metric.Float64Observable + + name string + opts []metric.Float64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Float64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*afUpDownCounter)(nil) + _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil) +) + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableUpDownCounter) + } + return nil +} + +type afGauge struct { + embedded.Float64ObservableGauge + metric.Float64Observable + + name string + opts []metric.Float64ObservableGaugeOption + + delegate atomic.Value // metric.Float64ObservableGauge +} + +var ( + _ unwrapper = (*afGauge)(nil) + _ metric.Float64ObservableGauge = (*afGauge)(nil) +) + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.Float64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Float64ObservableGauge) + } + return nil +} + +type aiCounter struct { + embedded.Int64ObservableCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableCounterOption + + delegate atomic.Value // metric.Int64ObservableCounter +} + +var ( + _ unwrapper = (*aiCounter)(nil) + _ metric.Int64ObservableCounter = (*aiCounter)(nil) +) + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableCounter) + } + return nil +} + +type aiUpDownCounter struct { + embedded.Int64ObservableUpDownCounter + metric.Int64Observable + + name string + opts []metric.Int64ObservableUpDownCounterOption + + delegate atomic.Value // metric.Int64ObservableUpDownCounter +} + +var ( + _ unwrapper = (*aiUpDownCounter)(nil) + _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil) +) + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableUpDownCounter) + } + return nil +} + +type aiGauge struct { + embedded.Int64ObservableGauge + metric.Int64Observable + + name string + opts []metric.Int64ObservableGaugeOption + + delegate atomic.Value // metric.Int64ObservableGauge +} + +var ( + _ unwrapper = (*aiGauge)(nil) + _ metric.Int64ObservableGauge = (*aiGauge)(nil) +) + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.Int64ObservableGauge(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Unwrap() metric.Observable { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(metric.Int64ObservableGauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + embedded.Float64Counter + + name string + opts []metric.Float64CounterOption + + delegate atomic.Value // metric.Float64Counter +} + +var _ metric.Float64Counter = (*sfCounter)(nil) + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Counter).Add(ctx, incr, opts...) + } +} + +type sfUpDownCounter struct { + embedded.Float64UpDownCounter + + name string + opts []metric.Float64UpDownCounterOption + + delegate atomic.Value // metric.Float64UpDownCounter +} + +var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil) + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Float64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...) + } +} + +type sfHistogram struct { + embedded.Float64Histogram + + name string + opts []metric.Float64HistogramOption + + delegate atomic.Value // metric.Float64Histogram +} + +var _ metric.Float64Histogram = (*sfHistogram)(nil) + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Float64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Float64Histogram).Record(ctx, x, opts...) + } +} + +type siCounter struct { + embedded.Int64Counter + + name string + opts []metric.Int64CounterOption + + delegate atomic.Value // metric.Int64Counter +} + +var _ metric.Int64Counter = (*siCounter)(nil) + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64Counter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Counter).Add(ctx, x, opts...) + } +} + +type siUpDownCounter struct { + embedded.Int64UpDownCounter + + name string + opts []metric.Int64UpDownCounterOption + + delegate atomic.Value // metric.Int64UpDownCounter +} + +var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil) + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.Int64UpDownCounter(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...) + } +} + +type siHistogram struct { + embedded.Int64Histogram + + name string + opts []metric.Int64HistogramOption + + delegate atomic.Value // metric.Int64Histogram +} + +var _ metric.Int64Histogram = (*siHistogram)(nil) + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.Int64Histogram(i.name, i.opts...) + if err != nil { + GetErrorHandler().Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(metric.Int64Histogram).Record(ctx, x, opts...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 00000000..c6f305a2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync/atomic" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger atomic.Pointer[logr.Logger] + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} + +// SetLogger overrides the globalLogger with l. +// +// To see Warn messages use a logger with `l.V(1).Enabled() == true` +// To see Info messages use a logger with `l.V(4).Enabled() == true` +// To see Debug messages use a logger with `l.V(8).Enabled() == true`. +func SetLogger(l logr.Logger) { + globalLogger.Store(&l) +} + +func getLogger() logr.Logger { + return *globalLogger.Load() +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less than 5 messages a minute. +func Info(msg string, keysAndValues ...interface{}) { + getLogger().V(4).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + getLogger().Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + getLogger().V(8).Info(msg, keysAndValues...) +} + +// Warn prints messages about warnings in the API or SDK. +// Not an error but is likely more important than an informational event. +func Warn(msg string, keysAndValues ...interface{}) { + getLogger().V(1).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go new file mode 100644 index 00000000..0097db47 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -0,0 +1,354 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "container/list" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + embedded.MeterProvider + + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + embedded.Meter + + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + + registry list.List + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + for e := m.registry.Front(); e != nil; e = e.Next() { + r := e.Value.(*registration) + r.setDelegate(meter) + m.registry.Remove(e) + } + + m.instruments = nil + m.registry.Init() +} + +func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &siHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Int64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &aiGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Counter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64UpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64Histogram(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &sfHistogram{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableUpDownCounter(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afUpDownCounter{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.Float64ObservableGauge(name, options...) + } + m.mtx.Lock() + defer m.mtx.Unlock() + i := &afGauge{name: name, opts: options} + m.instruments = append(m.instruments, i) + return i, nil +} + +// RegisterCallback captures the function that will be called during Collect. +func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(f, insts...) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + + reg := ®istration{instruments: insts, function: f} + e := m.registry.PushBack(reg) + reg.unreg = func() error { + m.mtx.Lock() + _ = m.registry.Remove(e) + m.mtx.Unlock() + return nil + } + return reg, nil +} + +type wrapped interface { + unwrap() metric.Observable +} + +func unwrapInstruments(instruments []metric.Observable) []metric.Observable { + out := make([]metric.Observable, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +type registration struct { + embedded.Registration + + instruments []metric.Observable + function metric.Callback + + unreg func() error + unregMu sync.Mutex +} + +func (c *registration) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + + c.unregMu.Lock() + defer c.unregMu.Unlock() + + if c.unreg == nil { + // Unregister already called. + return + } + + reg, err := m.RegisterCallback(c.function, insts...) + if err != nil { + GetErrorHandler().Handle(err) + } + + c.unreg = reg.Unregister +} + +func (c *registration) Unregister() error { + c.unregMu.Lock() + defer c.unregMu.Unlock() + if c.unreg == nil { + // Unregister already called. + return nil + } + + var err error + err, c.unreg = c.unreg(), nil + return err +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 00000000..06bac35c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 00000000..7985005b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "errors" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } + + meterProviderHolder struct { + mp metric.MeterProvider + } +) + +var ( + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + globalMeterProvider = defaultMeterProvider() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once + delegateMeterOnce sync.Once +) + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateTraceOnce.Do(func() { + if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeterProvider.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to it's current value. No delegate will be configured", + ) + return + } + } + + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +func defaultMeterProvider() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: &meterProvider{}}) + return v +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 00000000..3f61ec12 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + embedded.TracerProvider + + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct { + name string + version string +} + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + embedded.Tracer + + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + embedded.Span + + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 00000000..c4f8acd5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go new file mode 100644 index 00000000..f9551719 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric.go @@ -0,0 +1,53 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter returns a Meter from the global MeterProvider. The name must be the +// name of the library providing instrumentation. This name may be the same as +// the instrumented code only if that code provides built-in instrumentation. +// If the name is empty, then a implementation defined default name will be +// used instead. +// +// If this is called before a global MeterProvider is registered the returned +// Meter will be a No-op implementation of a Meter. When a global MeterProvider +// is registered for the first time, the returned Meter, and all the +// instruments it has created or will create, are recreated automatically from +// the new MeterProvider. +// +// This is short for GetMeterProvider().Meter(name). +func Meter(name string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(name, opts...) +} + +// GetMeterProvider returns the registered global meter provider. +// +// If no global GetMeterProvider has been registered, a No-op GetMeterProvider +// implementation is returned. When a global GetMeterProvider is registered for +// the first time, the returned GetMeterProvider, and all the Meters it has +// created or will create, are recreated automatically from the new +// GetMeterProvider. +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers mp as the global MeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/github.com/apache/thrift/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE similarity index 73% rename from vendor/github.com/apache/thrift/LICENSE rename to vendor/go.opentelemetry.io/otel/metric/LICENSE index 2bc6fbbf..261eeb9e 100644 --- a/vendor/github.com/apache/thrift/LICENSE +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -200,107 +199,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: - -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -*/ -(By Douglas Crockford ) - --------------------------------------------------- -For lib/cpp/src/thrift/windows/SocketPair.cpp - -/* socketpair.c - * Copyright 2007 by Nathan C. Myers ; some rights reserved. - * This code is Free Software. It may be copied freely, in original or - * modified form, subject only to the restrictions that (1) the author is - * relieved from all responsibilities for any use for any purpose, and (2) - * this copyright notice must be retained, unchanged, in its entirety. If - * for any reason the author might be held responsible for any consequences - * of copying or use, license is withheld. - */ - - --------------------------------------------------- -For lib/py/compat/win32/stdint.h - -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - - --------------------------------------------------- -Codegen template in t_html_generator.h - -* Bootstrap v2.0.3 -* -* Copyright 2012 Twitter, Inc -* Licensed under the Apache License v2.0 -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Designed and built with all the love in the world @twitter by @mdo and @fat. - ---------------------------------------------------- -For t_cl_generator.cc - - * Copyright (c) 2008- Patrick Collison - * Copyright (c) 2006- Facebook - ---------------------------------------------------- diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go new file mode 100644 index 00000000..072baa8e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Observable describes a set of instruments used asynchronously to +// record float64 measurements once per collection cycle. Observations of +// these instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Float64Observable interface { + Observable + + float64Observable() +} + +// Float64ObservableCounter is an instrument used to asynchronously record +// increasing float64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for +// unimplemented methods. +type Float64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableCounter + + Float64Observable +} + +// Float64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableCounterConfig returns a new +// [Float64ObservableCounterConfig] with all opts applied. +func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig { + var config Float64ObservableCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableCounterOption applies options to a +// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableCounterOption. +type Float64ObservableCounterOption interface { + applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig +} + +// Float64ObservableUpDownCounter is an instrument used to asynchronously +// record float64 measurements once per collection cycle. Observations are only +// made within a callback for this instrument. The value observed is assumed +// the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableUpDownCounter + + Float64Observable +} + +// Float64ObservableUpDownCounterConfig contains options for asynchronous +// counter instruments that record int64 values. +type Float64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableUpDownCounterConfig returns a new +// [Float64ObservableUpDownCounterConfig] with all opts applied. +func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { + var config Float64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableUpDownCounterOption applies options to a +// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableUpDownCounterOption. +type Float64ObservableUpDownCounterOption interface { + applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig +} + +// Float64ObservableGauge is an instrument used to asynchronously record +// instantaneous float64 measurements once per collection cycle. Observations +// are only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64ObservableGauge + + Float64Observable +} + +// Float64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Float64ObservableGaugeConfig struct { + description string + unit string + callbacks []Float64Callback +} + +// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig] +// with all opts applied. +func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig { + var config Float64ObservableGaugeConfig + for _, o := range opts { + config = o.applyFloat64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Float64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback { + return c.callbacks +} + +// Float64ObservableGaugeOption applies options to a +// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and +// [InstrumentOption] for other options that can be used as a +// Float64ObservableGaugeOption. +type Float64ObservableGaugeOption interface { + applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig +} + +// Float64Observer is a recorder of float64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Observer + + // Observe records the float64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value float64, options ...ObserveOption) +} + +// Float64Callback is a function registered with a Meter that makes +// observations for a Float64Observerable instrument it is registered with. +// Calls to the Float64Observer record measurement values for the +// Float64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Float64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Float64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Float64Callback func(context.Context, Float64Observer) error + +// Float64ObservableOption applies options to float64 Observer instruments. +type Float64ObservableOption interface { + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +type float64CallbackOpt struct { + cback Float64Callback +} + +func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithFloat64Callback adds callback to be called for an instrument. +func WithFloat64Callback(callback Float64Callback) Float64ObservableOption { + return float64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go new file mode 100644 index 00000000..9bd6ebf0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -0,0 +1,269 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Observable describes a set of instruments used asynchronously to record +// int64 measurements once per collection cycle. Observations of these +// instruments are only made within a callback. +// +// Warning: Methods may be added to this interface in minor releases. +type Int64Observable interface { + Observable + + int64Observable() +} + +// Int64ObservableCounter is an instrument used to asynchronously record +// increasing int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. The value observed is +// assumed the to be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableCounter + + Int64Observable +} + +// Int64ObservableCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig] +// with all opts applied. +func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig { + var config Int64ObservableCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableCounterOption applies options to a +// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableCounterOption. +type Int64ObservableCounterOption interface { + applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig +} + +// Int64ObservableUpDownCounter is an instrument used to asynchronously record +// int64 measurements once per collection cycle. Observations are only made +// within a callback for this instrument. The value observed is assumed the to +// be the cumulative sum of the count. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableUpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableUpDownCounter + + Int64Observable +} + +// Int64ObservableUpDownCounterConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableUpDownCounterConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableUpDownCounterConfig returns a new +// [Int64ObservableUpDownCounterConfig] with all opts applied. +func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { + var config Int64ObservableUpDownCounterConfig + for _, o := range opts { + config = o.applyInt64ObservableUpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableUpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableUpDownCounterConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableUpDownCounterOption applies options to a +// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableUpDownCounterOption. +type Int64ObservableUpDownCounterOption interface { + applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig +} + +// Int64ObservableGauge is an instrument used to asynchronously record +// instantaneous int64 measurements once per collection cycle. Observations are +// only made within a callback for this instrument. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64ObservableGauge interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64ObservableGauge + + Int64Observable +} + +// Int64ObservableGaugeConfig contains options for asynchronous counter +// instruments that record int64 values. +type Int64ObservableGaugeConfig struct { + description string + unit string + callbacks []Int64Callback +} + +// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig] +// with all opts applied. +func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig { + var config Int64ObservableGaugeConfig + for _, o := range opts { + config = o.applyInt64ObservableGauge(config) + } + return config +} + +// Description returns the configured description. +func (c Int64ObservableGaugeConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64ObservableGaugeConfig) Unit() string { + return c.unit +} + +// Callbacks returns the configured callbacks. +func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback { + return c.callbacks +} + +// Int64ObservableGaugeOption applies options to a +// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and +// [InstrumentOption] for other options that can be used as an +// Int64ObservableGaugeOption. +type Int64ObservableGaugeOption interface { + applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig +} + +// Int64Observer is a recorder of int64 measurements. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Observer + + // Observe records the int64 value. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Observe(value int64, options ...ObserveOption) +} + +// Int64Callback is a function registered with a Meter that makes observations +// for an Int64Observerable instrument it is registered with. Calls to the +// Int64Observer record measurement values for the Int64Observable. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Int64Callbacks. Meaning, it should not report measurements with the same +// attributes as another Int64Callbacks also registered for the same +// instrument. +// +// The function needs to be concurrent safe. +type Int64Callback func(context.Context, Int64Observer) error + +// Int64ObservableOption applies options to int64 Observer instruments. +type Int64ObservableOption interface { + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption +} + +type int64CallbackOpt struct { + cback Int64Callback +} + +func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + cfg.callbacks = append(cfg.callbacks, o.cback) + return cfg +} + +// WithInt64Callback adds callback to be called for an instrument. +func WithInt64Callback(callback Int64Callback) Int64ObservableOption { + return int64CallbackOpt{callback} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 00000000..778ad2d7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,92 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string + attrs attribute.Set + + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. +} + +// InstrumentationVersion returns the version of the library providing +// instrumentation. +func (cfg MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (cfg MeterConfig) InstrumentationAttributes() attribute.Set { + return cfg.attrs +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // applyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 00000000..54716e13 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,170 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package metric provides the OpenTelemetry API used to measure metrics about +source code operation. + +This API is separate from its implementation so the instrumentation built from +it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official +OpenTelemetry implementation of this API. + +All measurements made with this package are made via instruments. These +instruments are created by a [Meter] which itself is created by a +[MeterProvider]. Applications need to accept a [MeterProvider] implementation +as a starting point when instrumenting. This can be done directly, or by using +the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an +appropriately named [Meter] from the accepted [MeterProvider], instrumentation +can then be built from the [Meter]'s instruments. + +# Instruments + +Each instrument is designed to make measurements of a particular type. Broadly, +all instruments fall into two overlapping logical categories: asynchronous or +synchronous, and int64 or float64. + +All synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are used to measure the operation and performance of source +code during the source code execution. These instruments only make measurements +when the source code they instrument is run. + +All asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) are used to measure metrics outside of the execution +of source code. They are said to make "observations" via a callback function +called once every measurement collection cycle. + +Each instrument is also grouped by the value type it measures. Either int64 or +float64. The value being measured will dictate which instrument in these +categories to use. + +Outside of these two broad categories, instruments are described by the +function they are designed to serve. All Counters ([Int64Counter], +[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are +designed to measure values that never decrease in value, but instead only +incrementally increase in value. UpDownCounters ([Int64UpDownCounter], +[Float64UpDownCounter], [Int64ObservableUpDownCounter], and +[Float64ObservableUpDownCounter]) on the other hand, are designed to measure +values that can increase and decrease. When more information needs to be +conveyed about all the synchronous measurements made during a collection cycle, +a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally, +when just the most recent measurement needs to be conveyed about an +asynchronous measurement, a Gauge ([Int64ObservableGauge] and +[Float64ObservableGauge]) should be used. + +See the [OpenTelemetry documentation] for more information about instruments +and their intended use. + +# Measurements + +Measurements are made by recording values and information about the values with +an instrument. How these measurements are recorded depends on the instrument. + +Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter], +[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and +[Float64Histogram]) are recorded using the instrument methods directly. All +counter instruments have an Add method that is used to measure an increment +value, and all histogram instruments have a Record method to measure a data +point. + +Asynchronous instruments ([Int64ObservableCounter], +[Int64ObservableUpDownCounter], [Int64ObservableGauge], +[Float64ObservableCounter], [Float64ObservableUpDownCounter], and +[Float64ObservableGauge]) record measurements within a callback function. The +callback is registered with the Meter which ensures the callback is called once +per collection cycle. A callback can be registered two ways: during the +instrument's creation using an option, or later using the RegisterCallback +method of the [Meter] that created the instrument. + +If the following criteria are met, an option ([WithInt64Callback] or +[WithFloat64Callback]) can be used during the asynchronous instrument's +creation to register a callback ([Int64Callback] or [Float64Callback], +respectively): + + - The measurement process is known when the instrument is created + - Only that instrument will make a measurement within the callback + - The callback never needs to be unregistered + +If the criteria are not met, use the RegisterCallback method of the [Meter] that +created the instrument to register a [Callback]. + +# API Implementations + +This package does not conform to the standard Go versioning policy, all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/metric/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/metric/embedded" + + type MeterProvider struct { + embedded.MeterProvider + // ... + } + +If an author wants the default behavior of their implementations to a panic, +they need to embed the API interface directly. + + import "go.opentelemetry.io/otel/metric" + + type MeterProvider struct { + metric.MeterProvider + // ... + } + +This is not a recommended behavior as it could lead to publishing packages that +contain runtime panics when users update other package that use newer versions +of [go.opentelemetry.io/otel/metric]. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/metric/noop]: + + import "go.opentelemetry.io/otel/metric/noop" + + type MeterProvider struct { + noop.MeterProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. + +[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/ +[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go new file mode 100644 index 00000000..ae0bdbd2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go @@ -0,0 +1,234 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// metric API]. +// +// Implementers of the [OpenTelemetry metric API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry metric API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric +package embedded // import "go.opentelemetry.io/otel/metric/embedded" + +// MeterProvider is embedded in +// [go.opentelemetry.io/otel/metric.MeterProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type MeterProvider interface{ meterProvider() } + +// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Meter interface{ meter() } + +// Float64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Float64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Observer] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Observer interface{ float64Observer() } + +// Int64Observer is embedded in +// [go.opentelemetry.io/otel/metric.Int64Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Observer] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Observer interface{ int64Observer() } + +// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Observer] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Observer interface{ observer() } + +// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Registration] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/metric.Registration] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Registration interface{ registration() } + +// Float64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Float64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Counter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Counter interface{ float64Counter() } + +// Float64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Float64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Float64Histogram interface{ float64Histogram() } + +// Float64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableCounter interface{ float64ObservableCounter() } + +// Float64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableGauge interface{ float64ObservableGauge() } + +// Float64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// if you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() } + +// Float64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Float64UpDownCounter interface{ float64UpDownCounter() } + +// Int64Counter is embedded in +// [go.opentelemetry.io/otel/metric.Int64Counter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users +// to experience a compilation error, signaling they need to update to your +// latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Counter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Counter interface{ int64Counter() } + +// Int64Histogram is embedded in +// [go.opentelemetry.io/otel/metric.Int64Histogram]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64Histogram interface{ int64Histogram() } + +// Int64ObservableCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableCounter interface{ int64ObservableCounter() } + +// Int64ObservableGauge is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you +// want users to experience a compilation error, signaling they need to update +// to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Int64ObservableGauge interface{ int64ObservableGauge() } + +// Int64ObservableUpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if +// you want users to experience a compilation error, signaling they need to +// update to your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() } + +// Int64UpDownCounter is embedded in +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want +// users to experience a compilation error, signaling they need to update to +// your latest implementation, when the +// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Int64UpDownCounter interface{ int64UpDownCounter() } diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go new file mode 100644 index 00000000..be89cd53 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -0,0 +1,357 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import "go.opentelemetry.io/otel/attribute" + +// Observable is used as a grouping mechanism for all instruments that are +// updated within a Callback. +type Observable interface { + observable() +} + +// InstrumentOption applies options to all instruments. +type InstrumentOption interface { + Int64CounterOption + Int64UpDownCounterOption + Int64HistogramOption + Int64ObservableCounterOption + Int64ObservableUpDownCounterOption + Int64ObservableGaugeOption + + Float64CounterOption + Float64UpDownCounterOption + Float64HistogramOption + Float64ObservableCounterOption + Float64ObservableUpDownCounterOption + Float64ObservableGaugeOption +} + +// HistogramOption applies options to histogram instruments. +type HistogramOption interface { + Int64HistogramOption + Float64HistogramOption +} + +type descOpt string + +func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.description = string(o) + return c +} + +func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.description = string(o) + return c +} + +// WithDescription sets the instrument description. +func WithDescription(desc string) InstrumentOption { return descOpt(desc) } + +type unitOpt string + +func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { + c.unit = string(o) + return c +} + +func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig { + c.unit = string(o) + return c +} + +// WithUnit sets the instrument unit. +// +// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code. +func WithUnit(u string) InstrumentOption { return unitOpt(u) } + +// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries. +// +// This option is considered "advisory", and may be ignored by API implementations. +func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) } + +type bucketOpt []float64 + +func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig { + c.explicitBucketBoundaries = o + return c +} + +// AddOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as an AddOption. +type AddOption interface { + applyAdd(AddConfig) AddConfig +} + +// AddConfig contains options for an addition measurement. +type AddConfig struct { + attrs attribute.Set +} + +// NewAddConfig returns a new [AddConfig] with all opts applied. +func NewAddConfig(opts []AddOption) AddConfig { + config := AddConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyAdd(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c AddConfig) Attributes() attribute.Set { + return c.attrs +} + +// RecordOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a RecordOption. +type RecordOption interface { + applyRecord(RecordConfig) RecordConfig +} + +// RecordConfig contains options for a recorded measurement. +type RecordConfig struct { + attrs attribute.Set +} + +// NewRecordConfig returns a new [RecordConfig] with all opts applied. +func NewRecordConfig(opts []RecordOption) RecordConfig { + config := RecordConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyRecord(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c RecordConfig) Attributes() attribute.Set { + return c.attrs +} + +// ObserveOption applies options to an addition measurement. See +// [MeasurementOption] for other options that can be used as a ObserveOption. +type ObserveOption interface { + applyObserve(ObserveConfig) ObserveConfig +} + +// ObserveConfig contains options for an observed measurement. +type ObserveConfig struct { + attrs attribute.Set +} + +// NewObserveConfig returns a new [ObserveConfig] with all opts applied. +func NewObserveConfig(opts []ObserveOption) ObserveConfig { + config := ObserveConfig{attrs: *attribute.EmptySet()} + for _, o := range opts { + config = o.applyObserve(config) + } + return config +} + +// Attributes returns the configured attribute set. +func (c ObserveConfig) Attributes() attribute.Set { + return c.attrs +} + +// MeasurementOption applies options to all instrument measurement. +type MeasurementOption interface { + AddOption + RecordOption + ObserveOption +} + +type attrOpt struct { + set attribute.Set +} + +// mergeSets returns the union of keys between a and b. Any duplicate keys will +// use the value associated with b. +func mergeSets(a, b attribute.Set) attribute.Set { + // NewMergeIterator uses the first value for any duplicates. + iter := attribute.NewMergeIterator(&b, &a) + merged := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for iter.Next() { + merged = append(merged, iter.Attribute()) + } + return attribute.NewSet(merged...) +} + +func (o attrOpt) applyAdd(c AddConfig) AddConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyRecord(c RecordConfig) RecordConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig { + switch { + case o.set.Len() == 0: + case c.attrs.Len() == 0: + c.attrs = o.set + default: + c.attrs = mergeSets(c.attrs, o.set) + } + return c +} + +// WithAttributeSet sets the attribute Set associated with a measurement is +// made with. +// +// If multiple WithAttributeSet or WithAttributes options are passed the +// attributes will be merged together in the order they are passed. Attributes +// with duplicate keys will use the last value passed. +func WithAttributeSet(attributes attribute.Set) MeasurementOption { + return attrOpt{set: attributes} +} + +// WithAttributes converts attributes into an attribute Set and sets the Set to +// be associated with a measurement. This is shorthand for: +// +// cp := make([]attribute.KeyValue, len(attributes)) +// copy(cp, attributes) +// WithAttributes(attribute.NewSet(cp...)) +// +// [attribute.NewSet] may modify the passed attributes so this will make a copy +// of attributes before creating a set in order to ensure this function is +// concurrent safe. This makes this option function less optimized in +// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be +// preferred for performance sensitive code. +// +// See [WithAttributeSet] for information about how multiple WithAttributes are +// merged. +func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption { + cp := make([]attribute.KeyValue, len(attributes)) + copy(cp, attributes) + return attrOpt{set: attribute.NewSet(cp...)} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 00000000..2520bc74 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,212 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or package. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type MeterProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.MeterProvider + + // Meter returns a new Meter with the provided name and configuration. + // + // A Meter should be scoped at most to a single package. The name needs to + // be unique so it does not collide with other names used by + // an application, nor other applications. To achieve this, the import path + // of the instrumentation package is recommended to be used as name. + // + // If the name is empty, then an implementation defined default name will + // be used instead. + Meter(name string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Meter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Meter + + // Int64Counter returns a new Int64Counter instrument identified by name + // and configured with options. The instrument is used to synchronously + // record increasing int64 measurements during a computational operation. + Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record int64 measurements during a computational + // operation. + Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of int64 measurements during a + // computational operation. + Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified + // by name and configured with options. The instrument is used to + // asynchronously record increasing int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record int64 measurements once per + // a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous int64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithInt64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error) + + // Float64Counter returns a new Float64Counter instrument identified by + // name and configured with options. The instrument is used to + // synchronously record increasing float64 measurements during a + // computational operation. + Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument + // identified by name and configured with options. The instrument is used + // to synchronously record float64 measurements during a computational + // operation. + Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by + // name and configured with options. The instrument is used to + // synchronously record the distribution of float64 measurements during a + // computational operation. + Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64ObservableCounter returns a new Float64ObservableCounter + // instrument identified by name and configured with options. The + // instrument is used to asynchronously record increasing float64 + // measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new + // Float64ObservableUpDownCounter instrument identified by name and + // configured with options. The instrument is used to asynchronously record + // float64 measurements once per a measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument + // identified by name and configured with options. The instrument is used + // to asynchronously record instantaneous float64 measurements once per a + // measurement collection cycle. + // + // Measurements for the returned instrument are made via a callback. Use + // the WithFloat64Callback option to register the callback here, or use the + // RegisterCallback method of this Meter to register one later. See the + // Measurements section of the package documentation for more information. + Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error) + + // RegisterCallback registers f to be called during the collection of a + // measurement cycle. + // + // If Unregister of the returned Registration is called, f needs to be + // unregistered and not called during collection. + // + // The instruments f is registered with are the only instruments that f may + // observe values for. + // + // If no instruments are passed, f should not be registered nor called + // during collection. + // + // The function f needs to be concurrent safe. + RegisterCallback(f Callback, instruments ...Observable) (Registration, error) +} + +// Callback is a function registered with a Meter that makes observations for +// the set of instruments it is registered with. The Observer parameter is used +// to record measurement observations for these instruments. +// +// The function needs to complete in a finite amount of time and the deadline +// of the passed context is expected to be honored. +// +// The function needs to make unique observations across all registered +// Callbacks. Meaning, it should not report measurements for an instrument with +// the same attributes as another Callback will report. +// +// The function needs to be concurrent safe. +type Callback func(context.Context, Observer) error + +// Observer records measurements for multiple instruments in a Callback. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Observer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Observer + + // ObserveFloat64 records the float64 value for obsrv. + ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. + ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) +} + +// Registration is an token representing the unique registration of a callback +// for a set of instruments with a Meter. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Registration interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Registration + + // Unregister removes the callback registration from a Meter. + // + // This method needs to be idempotent and concurrent safe. + Unregister() error +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go new file mode 100644 index 00000000..0a4825ae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Float64Counter is an instrument that records increasing float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Float64CounterConfig struct { + description string + unit string +} + +// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts +// applied. +func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig { + var config Float64CounterConfig + for _, o := range opts { + config = o.applyFloat64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64CounterConfig) Unit() string { + return c.unit +} + +// Float64CounterOption applies options to a [Float64CounterConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64CounterOption. +type Float64CounterOption interface { + applyFloat64Counter(Float64CounterConfig) Float64CounterConfig +} + +// Float64UpDownCounter is an instrument that records increasing or decreasing +// float64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr float64, options ...AddOption) +} + +// Float64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Float64UpDownCounterConfig struct { + description string + unit string +} + +// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig] +// with all opts applied. +func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig { + var config Float64UpDownCounterConfig + for _, o := range opts { + config = o.applyFloat64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Float64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Float64UpDownCounterOption applies options to a +// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that +// can be used as a Float64UpDownCounterOption. +type Float64UpDownCounterOption interface { + applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig +} + +// Float64Histogram is an instrument that records a distribution of float64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Float64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Float64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr float64, options ...RecordOption) +} + +// Float64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Float64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all +// opts applied. +func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig { + var config Float64HistogramConfig + for _, o := range opts { + config = o.applyFloat64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Float64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Float64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Float64HistogramOption applies options to a [Float64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as a +// Float64HistogramOption. +type Float64HistogramOption interface { + applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig +} diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go new file mode 100644 index 00000000..56667d32 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go @@ -0,0 +1,185 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/embedded" +) + +// Int64Counter is an instrument that records increasing int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Counter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Counter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64CounterConfig contains options for synchronous counter instruments that +// record int64 values. +type Int64CounterConfig struct { + description string + unit string +} + +// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts +// applied. +func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig { + var config Int64CounterConfig + for _, o := range opts { + config = o.applyInt64Counter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64CounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64CounterConfig) Unit() string { + return c.unit +} + +// Int64CounterOption applies options to a [Int64CounterConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64CounterOption. +type Int64CounterOption interface { + applyInt64Counter(Int64CounterConfig) Int64CounterConfig +} + +// Int64UpDownCounter is an instrument that records increasing or decreasing +// int64 values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64UpDownCounter interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64UpDownCounter + + // Add records a change to the counter. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Add(ctx context.Context, incr int64, options ...AddOption) +} + +// Int64UpDownCounterConfig contains options for synchronous counter +// instruments that record int64 values. +type Int64UpDownCounterConfig struct { + description string + unit string +} + +// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with +// all opts applied. +func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig { + var config Int64UpDownCounterConfig + for _, o := range opts { + config = o.applyInt64UpDownCounter(config) + } + return config +} + +// Description returns the configured description. +func (c Int64UpDownCounterConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64UpDownCounterConfig) Unit() string { + return c.unit +} + +// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig]. +// See [InstrumentOption] for other options that can be used as an +// Int64UpDownCounterOption. +type Int64UpDownCounterOption interface { + applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig +} + +// Int64Histogram is an instrument that records a distribution of int64 +// values. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Int64Histogram interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Int64Histogram + + // Record adds an additional value to the distribution. + // + // Use the WithAttributeSet (or, if performance is not a concern, + // the WithAttributes) option to include measurement attributes. + Record(ctx context.Context, incr int64, options ...RecordOption) +} + +// Int64HistogramConfig contains options for synchronous counter instruments +// that record int64 values. +type Int64HistogramConfig struct { + description string + unit string + explicitBucketBoundaries []float64 +} + +// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts +// applied. +func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig { + var config Int64HistogramConfig + for _, o := range opts { + config = o.applyInt64Histogram(config) + } + return config +} + +// Description returns the configured description. +func (c Int64HistogramConfig) Description() string { + return c.description +} + +// Unit returns the configured unit. +func (c Int64HistogramConfig) Unit() string { + return c.unit +} + +// ExplicitBucketBoundaries returns the configured explicit bucket boundaries. +func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 { + return c.explicitBucketBoundaries +} + +// Int64HistogramOption applies options to a [Int64HistogramConfig]. See +// [InstrumentOption] for other options that can be used as an +// Int64HistogramOption. +type Int64HistogramOption interface { + applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig +} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 00000000..d29aaa32 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 00000000..303cdf1c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 00000000..c119eb28 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 00000000..c94438f7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 00000000..63e5d622 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "strings" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + delimiter = "-" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var ( + _ TextMapPropagator = TraceContext{} + versionPart = fmt.Sprintf("%.2X", supportedVersion) +) + +// Inject set tracecontext from the Context into the carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + var sb strings.Builder + sb.Grow(2 + 32 + 16 + 2 + 3) + _, _ = sb.WriteString(versionPart) + traceID := sc.TraceID() + spanID := sc.SpanID() + flagByte := [1]byte{byte(flags)} + var buf [32]byte + for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} { + _ = sb.WriteByte(delimiter[0]) + n := hex.Encode(buf[:], src) + _, _ = sb.Write(buf[:n]) + } + carrier.Set(traceparentHeader, sb.String()) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + var ver [1]byte + if !extractPart(ver[:], &h, 2) { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + if !extractPart(scc.TraceID[:], &h, 32) { + return trace.SpanContext{} + } + if !extractPart(scc.SpanID[:], &h, 16) { + return trace.SpanContext{} + } + + var opts [1]byte + if !extractPart(opts[:], &h, 2) { + return trace.SpanContext{} + } + if version == 0 && (h != "" || opts[0] > 2) { + // version 0 not allow extra + // version 0 not allow other flag + return trace.SpanContext{} + } + + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// upperHex detect hex is upper case Unicode characters. +func upperHex(v string) bool { + for _, c := range v { + if c >= 'A' && c <= 'F' { + return true + } + } + return false +} + +func extractPart(dst []byte, h *string, n int) bool { + part, left, _ := strings.Cut(*h, delimiter) + *h = left + // hex.Decode decodes unsupported upper-case characters, so exclude explicitly. + if len(part) != n || upperHex(part) { + return false + } + if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 { + return false + } + return true +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt new file mode 100644 index 00000000..e0a43e13 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -0,0 +1 @@ +codespell==2.2.6 diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go new file mode 100644 index 00000000..67d1d4c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -0,0 +1,1209 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTP Server spans attributes +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) + // if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the application + // layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the version + // of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.protocol.version` refers to the version of the protocol used + // and might be different from the protocol client's version. If the HTTP + // client used has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If defined for the address + // family and if different than `net.host.port` and if `net.sock.host.addr` + // is set. In other cases, it is still recommended to set this.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the version of +// the application layer protocol used. See note below. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go new file mode 100644 index 00000000..359c5a69 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.20.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go new file mode 100644 index 00000000..8ac9350d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go new file mode 100644 index 00000000..09ff4dfd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go new file mode 100644 index 00000000..342aede9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go new file mode 100644 index 00000000..a2b90674 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -0,0 +1,2071 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go new file mode 100644 index 00000000..e449e5c3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go new file mode 100644 index 00000000..85177414 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -0,0 +1,2610 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/users/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 00000000..caf7249d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// or +// +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index f058cc78..3aadc66c 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -25,6 +25,7 @@ type TracerConfig struct { instrumentationVersion string // Schema URL of the telemetry emitted by the Tracer. schemaURL string + attrs attribute.Set } // InstrumentationVersion returns the version of the library providing instrumentation. @@ -32,6 +33,12 @@ func (t *TracerConfig) InstrumentationVersion() string { return t.instrumentationVersion } +// InstrumentationAttributes returns the attributes associated with the library +// providing instrumentation. +func (t *TracerConfig) InstrumentationAttributes() attribute.Set { + return t.attrs +} + // SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. func (t *TracerConfig) SchemaURL() string { return t.schemaURL @@ -261,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { c.stackTrace = bool(o) return c } + func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { c.stackTrace = bool(o) return c @@ -307,6 +315,16 @@ func WithInstrumentationVersion(version string) TracerOption { }) } +// WithInstrumentationAttributes sets the instrumentation attributes. +// +// The passed attributes will be de-duplicated. +func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { + return tracerOptionFunc(func(config TracerConfig) TracerConfig { + config.attrs = attribute.NewSet(attr...) + return config + }) +} + // WithSchemaURL sets the schema URL for the Tracer. func WithSchemaURL(schemaURL string) TracerOption { return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index ab0346f9..440f3d75 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -62,5 +62,69 @@ a default. defer span.End() // ... } + +# API Implementations + +This package does not conform to the standard Go versioning policy; all of its +interfaces may have methods added to them without a package major version bump. +This non-standard API evolution could surprise an uninformed implementation +author. They could unknowingly build their implementation in a way that would +result in a runtime panic for their users that update to the new API. + +The API is designed to help inform an instrumentation author about this +non-standard API evolution. It requires them to choose a default behavior for +unimplemented interface methods. There are three behavior choices they can +make: + + - Compilation failure + - Panic + - Default to another implementation + +All interfaces in this API embed a corresponding interface from +[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default +behavior of their implementations to be a compilation failure, signaling to +their users they need to update to the latest version of that implementation, +they need to embed the corresponding interface from +[go.opentelemetry.io/otel/trace/embedded] in their implementation. For +example, + + import "go.opentelemetry.io/otel/trace/embedded" + + type TracerProvider struct { + embedded.TracerProvider + // ... + } + +If an author wants the default behavior of their implementations to panic, they +can embed the API interface directly. + + import "go.opentelemetry.io/otel/trace" + + type TracerProvider struct { + trace.TracerProvider + // ... + } + +This option is not recommended. It will lead to publishing packages that +contain runtime panics when users update to newer versions of +[go.opentelemetry.io/otel/trace], which may be done with a trasitive +dependency. + +Finally, an author can embed another implementation in theirs. The embedded +implementation will be used for methods not defined by the author. For example, +an author who wants to default to silently dropping the call can use +[go.opentelemetry.io/otel/trace/noop]: + + import "go.opentelemetry.io/otel/trace/noop" + + type TracerProvider struct { + noop.TracerProvider + // ... + } + +It is strongly recommended that authors only embed +[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior. +That implementation is the only one OpenTelemetry authors can guarantee will +fully implement all the API interfaces when a user updates their API. */ package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go new file mode 100644 index 00000000..898db5a7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go @@ -0,0 +1,56 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package embedded provides interfaces embedded within the [OpenTelemetry +// trace API]. +// +// Implementers of the [OpenTelemetry trace API] can embed the relevant type +// from this package into their implementation directly. Doing so will result +// in a compilation error for users when the [OpenTelemetry trace API] is +// extended (which is something that can happen without a major version bump of +// the API package). +// +// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace +package embedded // import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider is embedded in +// [go.opentelemetry.io/otel/trace.TracerProvider]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to +// experience a compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider] +// interface is extended (which is something that can happen without a major +// version bump of the API package). +type TracerProvider interface{ tracerProvider() } + +// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface +// is extended (which is something that can happen without a major version bump +// of the API package). +type Tracer interface{ tracer() } + +// Span is embedded in [go.opentelemetry.io/otel/trace.Span]. +// +// Embed this interface in your implementation of the +// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a +// compilation error, signaling they need to update to your latest +// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is +// extended (which is something that can happen without a major version bump of +// the API package). +type Span interface{ span() } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index 73950f20..c125491c 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -19,16 +19,20 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) // NewNoopTracerProvider returns an implementation of TracerProvider that // performs no operations. The Tracer and Spans created from the returned // TracerProvider also perform no operations. +// +// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider] +// instead. func NewNoopTracerProvider() TracerProvider { return noopTracerProvider{} } -type noopTracerProvider struct{} +type noopTracerProvider struct{ embedded.TracerProvider } var _ TracerProvider = noopTracerProvider{} @@ -37,8 +41,8 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { return noopTracer{} } -// noopTracer is an implementation of Tracer that preforms no operations. -type noopTracer struct{} +// noopTracer is an implementation of Tracer that performs no operations. +type noopTracer struct{ embedded.Tracer } var _ Tracer = noopTracer{} @@ -53,8 +57,8 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption return ContextWithSpan(ctx, span), span } -// noopSpan is an implementation of Span that preforms no operations. -type noopSpan struct{} +// noopSpan is an implementation of Span that performs no operations. +type noopSpan struct{ embedded.Span } var _ Span = noopSpan{} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 4aa94f79..26a4b226 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -22,6 +22,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -48,8 +49,10 @@ func (e errorConst) Error() string { // nolint:revive // revive complains about stutter of `trace.TraceID`. type TraceID [16]byte -var nilTraceID TraceID -var _ json.Marshaler = nilTraceID +var ( + nilTraceID TraceID + _ json.Marshaler = nilTraceID +) // IsValid checks whether the trace TraceID is valid. A valid trace ID does // not consist of zeros only. @@ -71,8 +74,10 @@ func (t TraceID) String() string { // SpanID is a unique identity of a span in a trace. type SpanID [8]byte -var nilSpanID SpanID -var _ json.Marshaler = nilSpanID +var ( + nilSpanID SpanID + _ json.Marshaler = nilSpanID +) // IsValid checks whether the SpanID is valid. A valid SpanID does not consist // of zeros only. @@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { // create a Span and it is then up to the operation the Span represents to // properly end the Span when the operation itself ends. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + // End completes the Span. The Span is considered complete and ready to be // delivered through the rest of the telemetry pipeline after this method // is called. Therefore, updates to the Span are not allowed after this @@ -486,8 +498,15 @@ func (sk SpanKind) String() string { // Tracer is the creator of Spans. // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + // Start creates a span and a context.Context containing the newly-created span. // // If the context.Context provided in `ctx` contains a Span then the newly-created @@ -518,8 +537,15 @@ type Tracer interface { // at runtime from its users or it can simply use the globally registered one // (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // -// Warning: methods may be added to this interface in minor releases. +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + // Tracer returns a unique Tracer scoped to be used by instrumentation code // to trace computational workflows. The scope and identity of that // instrumentation code is uniquely defined by the name and options passed. diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index ca68a82e..db936ba5 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -17,20 +17,14 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "encoding/json" "fmt" - "regexp" "strings" ) const ( maxListMembers = 32 - listDelimiter = "," - - // based on the W3C Trace Context specification, see - // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + listDelimiters = "," + memberDelimiter = "=" errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" @@ -39,43 +33,138 @@ const ( errDuplicate errorConst = "duplicate list-member in tracestate" ) -var ( - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) -) - type member struct { Key string Value string } +// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) +// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . +func checkValueChar(v byte) bool { + return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . +func checkValueLast(v byte) bool { + return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// based on the W3C Trace Context specification +// +// value = (0*255(chr)) nblk-chr +// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +// chr = %x20 / nblk-chr +// +// see https://www.w3.org/TR/trace-context-1/#value +func checkValue(val string) bool { + n := len(val) + if n == 0 || n > 256 { + return false + } + for i := 0; i < n-1; i++ { + if !checkValueChar(val[i]) { + return false + } + } + return checkValueLast(val[n-1]) +} + +func checkKeyRemain(key string) bool { + // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) + for _, v := range key { + if isAlphaNum(byte(v)) { + continue + } + switch v { + case '_', '-', '*', '/': + continue + } + return false + } + return true +} + +// according to +// +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// +// param n is remain part length, should be 255 in simple-key or 13 in system-id. +func checkKeyPart(key string, n int) bool { + if len(key) == 0 { + return false + } + first := key[0] // key's first char + ret := len(key[1:]) <= n + ret = ret && first >= 'a' && first <= 'z' + return ret && checkKeyRemain(key[1:]) +} + +func isAlphaNum(c byte) bool { + if c >= 'a' && c <= 'z' { + return true + } + return c >= '0' && c <= '9' +} + +// according to +// +// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// +// param n is remain part length, should be 240 exactly. +func checkKeyTenant(key string, n int) bool { + if len(key) == 0 { + return false + } + return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) +} + +// based on the W3C Trace Context specification +// +// key = simple-key / multi-tenant-key +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// multi-tenant-key = tenant-id "@" system-id +// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// lcalpha = %x61-7A ; a-z +// +// see https://www.w3.org/TR/trace-context-1/#tracestate-header. +func checkKey(key string) bool { + tenant, system, ok := strings.Cut(key, "@") + if !ok { + return checkKeyPart(key, 255) + } + return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) +} + func newMember(key, value string) (member, error) { - if !keyRe.MatchString(key) { - return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + if !checkKey(key) { + return member{}, errInvalidKey } - if !valueRe.MatchString(value) { - return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + if !checkValue(value) { + return member{}, errInvalidValue } return member{Key: key, Value: value}, nil } func parseMember(m string) (member, error) { - matches := memberRe.FindStringSubmatch(m) - if len(matches) != 5 { + key, val, ok := strings.Cut(m, memberDelimiter) + if !ok { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } - - return member{ - Key: matches[1], - Value: matches[4], - }, nil + key = strings.TrimLeft(key, " \t") + val = strings.TrimRight(val, " \t") + result, e := newMember(key, val) + if e != nil { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + return result, nil } // String encodes member into a string compliant with the W3C Trace Context // specification. func (m member) String() string { - return fmt.Sprintf("%s=%s", m.Key, m.Value) + return m.Key + "=" + m.Value } // TraceState provides additional vendor-specific trace identification @@ -99,8 +188,8 @@ var _ json.Marshaler = TraceState{} // ParseTraceState attempts to decode a TraceState from the passed // string. It returns an error if the input is invalid according to the W3C // Trace Context specification. -func ParseTraceState(tracestate string) (TraceState, error) { - if tracestate == "" { +func ParseTraceState(ts string) (TraceState, error) { + if ts == "" { return TraceState{}, nil } @@ -110,7 +199,9 @@ func ParseTraceState(tracestate string) (TraceState, error) { var members []member found := make(map[string]struct{}) - for _, memberStr := range strings.Split(tracestate, listDelimiter) { + for ts != "" { + var memberStr string + memberStr, ts, _ = strings.Cut(ts, listDelimiters) if len(memberStr) == 0 { continue } @@ -143,11 +234,29 @@ func (ts TraceState) MarshalJSON() ([]byte, error) { // Trace Context specification. The returned string will be invalid if the // TraceState contains any invalid members. func (ts TraceState) String() string { - members := make([]string, len(ts.list)) - for i, m := range ts.list { - members[i] = m.String() + if len(ts.list) == 0 { + return "" + } + var n int + n += len(ts.list) // member delimiters: '=' + n += len(ts.list) - 1 // list delimiters: ',' + for _, mem := range ts.list { + n += len(mem.Key) + n += len(mem.Value) } - return strings.Join(members, listDelimiter) + + var sb strings.Builder + sb.Grow(n) + _, _ = sb.WriteString(ts.list[0].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[0].Value) + for i := 1; i < len(ts.list); i++ { + _ = sb.WriteByte(listDelimiters[0]) + _, _ = sb.WriteString(ts.list[i].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[i].Value) + } + return sb.String() } // Get returns the value paired with key from the corresponding TraceState @@ -179,15 +288,25 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) { if err != nil { return ts, err } - - cTS := ts.Delete(key) - if cTS.Len()+1 <= maxListMembers { - cTS.list = append(cTS.list, member{}) + n := len(ts.list) + found := n + for i := range ts.list { + if ts.list[i].Key == key { + found = i + } + } + cTS := TraceState{} + if found == n && n < maxListMembers { + cTS.list = make([]member, n+1) + } else { + cTS.list = make([]member, n) } - // When the number of members exceeds capacity, drop the "right-most". - copy(cTS.list[1:], cTS.list) cTS.list[0] = m - + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], ts.list[0:found]) + if found < n { + copy(cTS.list[1+found:], ts.list[found+1:]) + } return cTS, nil } diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 00000000..dbb61a42 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/go.opentelemetry.io/otel/version.go similarity index 73% rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go rename to vendor/go.opentelemetry.io/otel/version.go index c318385c..c7aba1c3 100644 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -1,4 +1,4 @@ -// Copyright 2013 Matt T. Proud +// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,5 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.22.0" +} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 00000000..a9cfb80a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,52 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module-sets: + stable-v1: + version: v1.22.0 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/dice + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/trace + experimental-metrics: + version: v0.45.0 + modules: + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/opencensus + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + experimental-schema: + version: v0.0.7 + modules: + - go.opentelemetry.io/otel/schema +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go index 5dfacbb9..661ea132 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index f1f66230..7dd2638e 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index 02ff3d05..db42e667 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (!arm64 && !s390x && !ppc64le) || !gc || purego -// +build !arm64,!s390x,!ppc64le !gc purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go index da420b2e..3a4287f9 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 5c0fed26..66aebae2 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -20,7 +20,6 @@ // due to the calling conventions and initialization of constants. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go index 4652247b..683ccfd1 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s index f3ef5a01..1eda91a3 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "go_asm.h" #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go index 0c408c57..50695a14 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package chacha20poly1305 diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s index 867c181a..731d2ac6 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -5,7 +5,6 @@ // This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" // General register allocation @@ -184,11 +183,31 @@ GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 #define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 #define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 #define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 + // Some macros + +// ROL rotates the uint32s in register R left by N bits, using temporary T. +#define ROL(N, R, T) \ + MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R + +// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL16(R, T) PSHUFB ·rol16<>(SB), R +#else +#define ROL16(R, T) ROL(16, R, T) +#endif + +// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL8(R, T) PSHUFB ·rol8<>(SB), R +#else +#define ROL8(R, T) ROL(8, R, T) +#endif + #define chachaQR(A, B, C, D, T) \ - PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D \ + PADDD B, A; PXOR A, D; ROL16(D, T) \ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ - PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D \ + PADDD B, A; PXOR A, D; ROL8(D, T) \ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B #define chachaQR_AVX2(A, B, C, D, T) \ diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go index f832b33d..34e6ab1d 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !amd64 || !gc || purego -// +build !amd64 !gc purego package chacha20poly1305 diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go index 6fc2838a..2492f796 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -733,13 +733,14 @@ func (s *String) ReadOptionalASN1OctetString(out *[]byte, outPresent *bool, tag return true } -// ReadOptionalASN1Boolean sets *out to the value of the next ASN.1 BOOLEAN or, -// if the next bytes are not an ASN.1 BOOLEAN, to the value of defaultValue. -// It reports whether the operation was successful. -func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool { +// ReadOptionalASN1Boolean attempts to read an optional ASN.1 BOOLEAN +// explicitly tagged with tag into out and advances. If no element with a +// matching tag is present, it sets "out" to defaultValue instead. It reports +// whether the read was successful. +func (s *String) ReadOptionalASN1Boolean(out *bool, tag asn1.Tag, defaultValue bool) bool { var present bool var child String - if !s.ReadOptionalASN1(&child, &present, asn1.BOOLEAN) { + if !s.ReadOptionalASN1(&child, &present, tag) { return false } @@ -748,7 +749,7 @@ func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool { return true } - return s.ReadASN1Boolean(out) + return child.ReadASN1Boolean(out) } func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool { diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go index dda3f143..f4ded5fe 100644 --- a/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -56,7 +56,9 @@ func (f *hkdf) Read(p []byte) (int, error) { // Fill the rest of the buffer for len(p) > 0 { - f.expander.Reset() + if f.counter > 1 { + f.expander.Reset() + } f.expander.Write(f.prev) f.expander.Write(f.info) f.expander.Write([]byte{f.counter}) diff --git a/vendor/golang.org/x/crypto/internal/alias/alias.go b/vendor/golang.org/x/crypto/internal/alias/alias.go index 69c17f82..551ff0c3 100644 --- a/vendor/golang.org/x/crypto/internal/alias/alias.go +++ b/vendor/golang.org/x/crypto/internal/alias/alias.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !purego -// +build !purego // Package alias implements memory aliasing tests. package alias diff --git a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go index 4775b0a4..6fe61b5c 100644 --- a/vendor/golang.org/x/crypto/internal/alias/alias_purego.go +++ b/vendor/golang.org/x/crypto/internal/alias/alias_purego.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build purego -// +build purego // Package alias implements memory aliasing tests. package alias diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go deleted file mode 100644 index 45b5c966..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go deleted file mode 100644 index ed52b341..00000000 --- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index f184b67d..333da285 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (!amd64 && !ppc64le && !s390x) || !gc || purego -// +build !amd64,!ppc64le,!s390x !gc purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go index 6d522333..164cd47d 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s index 1d74f0f8..e0d3c647 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go index e041da5e..ec2202bd 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -7,7 +7,10 @@ package poly1305 -import "encoding/binary" +import ( + "encoding/binary" + "math/bits" +) // Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag // for a 64 bytes message is approximately @@ -114,13 +117,13 @@ type uint128 struct { } func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) + hi, lo := bits.Mul64(a, b) return uint128{lo, hi} } func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) + lo, c := bits.Add64(a.lo, b.lo, 0) + hi, c := bits.Add64(a.hi, b.hi, c) if c != 0 { panic("poly1305: unexpected overflow") } @@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) { // hide leading zeroes. For full chunks, that's 1 << 128, so we can just // add 1 to the most significant (2¹²â¸) limb, h2. if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) h2 += c + 1 msg = msg[TagSize:] @@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) { copy(buf[:], msg) buf[len(msg)] = 1 - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) h2 += c msg = nil @@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) { m3 := h2r1 t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) + t1, c := bits.Add64(m1.lo, m0.hi, 0) + t2, c := bits.Add64(m2.lo, m1.hi, c) + t3, _ := bits.Add64(m3.lo, m2.hi, c) // Now we have the result as 4 64-bit limbs, and we need to reduce it // modulo 2¹³Ⱐ- 5. The special shape of this Crandall prime lets us do @@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) { // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c cc = shiftRightBy2(cc) - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most @@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // in constant time, we compute t = h - (2¹³Ⱐ- 5), and select h as the // result if the subtraction underflows, and t otherwise. - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) + hMinusP0, b := bits.Sub64(h0, p0, 0) + hMinusP1, b := bits.Sub64(h1, p1, b) + _, b = bits.Sub64(h2, p2, b) // h = h if h < p else h - p h0 = select64(b, h0, hMinusP0) @@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // // by just doing a wide addition with the 128 low bits of h and discarding // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) + h0, c := bits.Add64(h0, s[0], 0) + h1, _ = bits.Add64(h1, s[1], c) binary.LittleEndian.PutUint64(out[0:8], h0) binary.LittleEndian.PutUint64(out[8:16], h1) diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go index 4a069941..4aec4874 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s index 58422aad..d2ca5dee 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go index ec959668..e1d033a4 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego package poly1305 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s index aa9e0494..0fe3a7c2 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc && !purego -// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 00000000..fbf1934a --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index cff0cd49..46ceac34 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -3,23 +3,20 @@ // license that can be found in the LICENSE file. // Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. package slices -import "golang.org/x/exp/constraints" +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { +func Equal[S ~[]E, E comparable](s1, s2 S) bool { if len(s1) != len(s2) { return false } @@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool { return true } -// EqualFunc reports whether two slices are equal using a comparison +// EqualFunc reports whether two slices are equal using an equality // function on each pair of elements. If the lengths are different, // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { if len(s1) != len(s2) { return false } @@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { return true } -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, // until one element is not equal to the other. // The result of comparing the first non-matching elements is returned. // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 + if c := cmpCompare(v1, v2); c != 0 { + return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 } -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] @@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 @@ -103,9 +92,9 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. -func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { return i } } @@ -114,9 +103,9 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { return i } } @@ -124,66 +113,237 @@ func IndexFunc[E any](s []E, f func(E) bool) int { } // Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { +func Contains[S ~[]E, E comparable](s S, v E) bool { return Index(s, v) >= 0 } // ContainsFunc reports whether at least one // element e of s satisfies f(e). -func ContainsFunc[E any](s []E, f func(E) bool) bool { +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { return IndexFunc(s, f) >= 0 } // Insert inserts the values v... into s at index i, // returning the modified slice. -// In the returned slice r, r[i] == v[0]. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) copy(s2[i:], v) + copy(s2[i+m:], s[i:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } } // Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } - return append(s[:i], s[j:]...) + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// DeleteFunc zeroes the elements between the new length and the original length. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + tot := len(s[:i]) + len(v) + len(s[j:]) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r } // Clone returns a copy of the slice. @@ -198,40 +358,43 @@ func Clone[S ~[]E, E any](s S) S { // Compact replaces consecutive runs of equal elements with a single copy. // This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } -// CompactFunc is like Compact but uses a comparison function. +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } @@ -256,3 +419,97 @@ func Grow[S ~[]E, E any](s S, n int) S { func Clip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index f14f40da..b67897f7 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + package slices import ( @@ -11,57 +13,116 @@ import ( ) // Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { n := len(x) pdqsortOrdered(x, 0, n, bits.Len(uint(n))) } -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b. // -// SortFunc requires that less is a strict weak ordering. +// SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) } // SortStableFunc sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { + if cmpLess(x[i], x[i-1]) { return false } } return true } -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { + if cmp(x[i], x[i-1]) < 0 { return false } } return true } +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { // Inlining is faster than calling BinarySearchFunc with a lambda. n := len(x) // Define x[-1] < target and x[n] >= target. @@ -70,22 +131,24 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { for i < j { h := int(uint(i+j) >> 1) // avoid overflow when computing h // i ≤ h < j - if x[h] < target { + if cmpLess(x[h], target) { i = h + 1 // preserves x[i-1] < target } else { j = h // preserves x[j] >= target } } // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && x[i] == target + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) } -// BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. -func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { n := len(x) // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. @@ -124,3 +187,9 @@ func (r *xorshift) Next() uint64 { func nextPowerOfTwo(length int) uint { return 1 << bits.Len(uint(length)) } + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go similarity index 64% rename from vendor/golang.org/x/exp/slices/zsortfunc.go rename to vendor/golang.org/x/exp/slices/zsortanyfunc.go index 2a632476..06f2c7a2 100644 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -6,28 +6,28 @@ package slices -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { data[j], data[j-1] = data[j-1], data[j] } } } -// siftDownLessFunc implements the heap property on data[lo:hi]. +// siftDownCmpFunc implements the heap property on data[lo:hi]. // first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { root := lo for { child := 2*root + 1 if child >= hi { break } - if child+1 < hi && less(data[first+child], data[first+child+1]) { + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { child++ } - if !less(data[first+root], data[first+child]) { + if !(cmp(data[first+root], data[first+child]) < 0) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool } } -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { first := a lo := 0 hi := b - a // Build heap with greatest element at top. for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) + siftDownCmpFunc(data, i, hi, first, cmp) } // Pop elements, largest first, into end of data. for i := hi - 1; i >= 0; i-- { data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) + siftDownCmpFunc(data, lo, i, first, cmp) } } -// pdqsortLessFunc sorts data[a:b]. +// pdqsortCmpFunc sorts data[a:b]. // The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. // pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf // C++ implementation: https://github.com/orlp/pdqsort // Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ // limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { const maxInsertion = 12 var ( @@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { length := b - a if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) return } // Fall back to heapsort if too many bad choices were made. if limit == 0 { - heapSortLessFunc(data, a, b, less) + heapSortCmpFunc(data, a, b, cmp) return } // If the last partitioning was imbalanced, we need to breaking patterns. if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) + breakPatternsCmpFunc(data, a, b, cmp) limit-- } - pivot, hint := choosePivotLessFunc(data, a, b, less) + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) + reverseRangeCmpFunc(data, a, b, cmp) // The chosen pivot was pivot-a elements after the start of the array. // After reversing it is pivot-a elements before the end of the array. // The idea came from Rust's implementation. @@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { // The slice is likely already sorted. if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { + if partialInsertionSortCmpFunc(data, a, b, cmp) { return } } // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) a = mid continue } - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) wasPartitioned = alreadyPartitioned leftLen, rightLen := mid-a, b-mid balanceThreshold := length / 8 if leftLen < rightLen { wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) + pdqsortCmpFunc(data, a, mid, limit, cmp) a = mid + 1 } else { wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) + pdqsortCmpFunc(data, mid+1, b, limit, cmp) b = mid } } } -// partitionLessFunc does one quicksort partition. +// partitionCmpFunc does one quicksort partition. // Let p = data[pivot] // Moves elements in data[a:b] around, so that data[i]

    =p for inewpivot. // On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) j-- for { - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) return j, false } -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. // It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !less(data[a], data[i]) { + for i <= j && !(cmp(data[a], data[i]) < 0) { i++ } - for i <= j && less(data[a], data[j]) { + for i <= j && (cmp(data[a], data[j]) < 0) { j-- } if i > j { @@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) return i } -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { const ( maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted shortestShifting = 50 // don't shift any elements on short arrays ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { + for i < b && !(cmp(data[i], data[i-1]) < 0) { i++ } @@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b return false } -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns // that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { length := b - a if length >= 8 { random := xorshift(length) @@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -// choosePivotLessFunc chooses a pivot in data[a:b]. +// choosePivotCmpFunc chooses a pivot in data[a:b]. // // [0,8): chooses a static pivot. // [8,shortestNinther): uses the simple median-of-three method. // [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { const ( shortestNinther = 50 maxSwaps = 4 * 3 @@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv if l >= 8 { if l >= shortestNinther { // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) } // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) + j = medianCmpFunc(data, i, j, k, &swaps, cmp) } switch swaps { @@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv } } -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { *swaps++ return b, a } return a, b } -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) return b } -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) } -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { i := a j := b - 1 for i < j { @@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { for i := 0; i < n; i++ { data[a+i], data[b+i] = data[b+i], data[a+i] } } -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { blockSize := 20 // must be > 0 a, b := 0, blockSize for b <= n { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) a = b b += blockSize } - insertionSortLessFunc(data, a, n, less) + insertionSortCmpFunc(data, a, n, cmp) for blockSize < n { a, b = 0, 2*blockSize for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) + symMergeCmpFunc(data, a, a+blockSize, b, cmp) a = b b += 2 * blockSize } if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) + symMergeCmpFunc(data, a, m, n, cmp) } blockSize *= 2 } } -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using // the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum // Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz // Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in @@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { // symMerge assumes non-degenerate arguments: a < m && m < b. // Having the caller check this condition eliminates many leaf recursion calls, // which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { // Avoid unnecessary recursions of symMerge // by direct insertion of data[a] into data[m:b] // if data[a:m] only contains one element. @@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := b for i < j { h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { + if cmp(data[h], data[a]) < 0 { i = h + 1 } else { j = h @@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := m for i < j { h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { + if !(cmp(data[m], data[h]) < 0) { i = h + 1 } else { j = h @@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { for start < r { c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { + if !(cmp(data[p-c], data[c]) < 0) { start = c + 1 } else { r = c @@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { end := n - start if start < m && m < end { - rotateLessFunc(data, start, m, end, less) + rotateCmpFunc(data, start, m, end, cmp) } if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) + symMergeCmpFunc(data, a, start, mid, cmp) } if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) + symMergeCmpFunc(data, mid, end, b, cmp) } } -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: // Data of the form 'x u v y' is changed to 'x v u y'. // rotate performs at most b-a many calls to data.Swap, // and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { i := m - a j := b - m for i != j { if i > j { - swapRangeLessFunc(data, m-i, m, j, less) + swapRangeCmpFunc(data, m-i, m, j, cmp) i -= j } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) j -= i } } // i == j - swapRangeLessFunc(data, m-i, m, i, less) + swapRangeCmpFunc(data, m-i, m, i, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go index efaa1c8b..99b47c39 100644 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints" // insertionSortOrdered sorts data[a:b] using insertion sort. func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { data[j], data[j-1] = data[j-1], data[j] } } @@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { if child >= hi { break } - if child+1 < hi && (data[first+child] < data[first+child+1]) { + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { child++ } - if !(data[first+root] < data[first+child]) { + if !cmpLess(data[first+root], data[first+child]) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { + if a > 0 && !cmpLess(data[a-1], data[pivot]) { mid := partitionEqualOrdered(data, a, b, pivot) a = mid continue @@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo j-- for { - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !(data[a] < data[i]) { + for i <= j && !cmpLess(data[a], data[i]) { i++ } - for i <= j && (data[a] < data[j]) { + for i <= j && cmpLess(data[a], data[j]) { j-- } if i > j { @@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { + for i < b && !cmpLess(data[i], data[i-1]) { i++ } @@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h // order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { + if cmpLess(data[b], data[a]) { *swaps++ return b, a } @@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := b for i < j { h := int(uint(i+j) >> 1) - if data[h] < data[a] { + if cmpLess(data[h], data[a]) { i = h + 1 } else { j = h @@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := m for i < j { h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { + if !cmpLess(data[m], data[h]) { i = h + 1 } else { j = h @@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { for start < r { c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { + if !cmpLess(data[p-c], data[c]) { start = c + 1 } else { r = c diff --git a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go index 2681af35..150f887e 100644 --- a/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go +++ b/vendor/golang.org/x/mod/internal/lazyregexp/lazyre.go @@ -13,7 +13,7 @@ import ( "sync" ) -// Regexp is a wrapper around regexp.Regexp, where the underlying regexp will be +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be // compiled the first time it is needed. type Regexp struct { str string diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index e9dec6e6..2a364b22 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -4,7 +4,7 @@ // Package module defines the module.Version type along with support code. // -// The module.Version type is a simple Path, Version pair: +// The [module.Version] type is a simple Path, Version pair: // // type Version struct { // Path string @@ -12,7 +12,7 @@ // } // // There are no restrictions imposed directly by use of this structure, -// but additional checking functions, most notably Check, verify that +// but additional checking functions, most notably [Check], verify that // a particular path, version pair is valid. // // # Escaped Paths @@ -140,7 +140,7 @@ type ModuleError struct { Err error } -// VersionError returns a ModuleError derived from a Version and error, +// VersionError returns a [ModuleError] derived from a [Version] and error, // or err itself if it is already such an error. func VersionError(v Version, err error) error { var mErr *ModuleError @@ -169,7 +169,7 @@ func (e *ModuleError) Unwrap() error { return e.Err } // An InvalidVersionError indicates an error specific to a version, with the // module path unknown or specified externally. // -// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError +// A [ModuleError] may wrap an InvalidVersionError, but an InvalidVersionError // must not wrap a ModuleError. type InvalidVersionError struct { Version string @@ -193,8 +193,8 @@ func (e *InvalidVersionError) Error() string { func (e *InvalidVersionError) Unwrap() error { return e.Err } // An InvalidPathError indicates a module, import, or file path doesn't -// satisfy all naming constraints. See CheckPath, CheckImportPath, -// and CheckFilePath for specific restrictions. +// satisfy all naming constraints. See [CheckPath], [CheckImportPath], +// and [CheckFilePath] for specific restrictions. type InvalidPathError struct { Kind string // "module", "import", or "file" Path string @@ -294,7 +294,7 @@ func fileNameOK(r rune) bool { } // CheckPath checks that a module path is valid. -// A valid module path is a valid import path, as checked by CheckImportPath, +// A valid module path is a valid import path, as checked by [CheckImportPath], // with three additional constraints. // First, the leading path element (up to the first slash, if any), // by convention a domain name, must contain only lower-case ASCII letters, @@ -380,7 +380,7 @@ const ( // checkPath returns an error describing why the path is not valid. // Because these checks apply to module, import, and file paths, // and because other checks may be applied, the caller is expected to wrap -// this error with InvalidPathError. +// this error with [InvalidPathError]. func checkPath(path string, kind pathKind) error { if !utf8.ValidString(path) { return fmt.Errorf("invalid UTF-8") @@ -532,7 +532,7 @@ var badWindowsNames = []string{ // they require ".vN" instead of "/vN", and for all N, not just N >= 2. // SplitPathVersion returns with ok = false when presented with // a path whose last path element does not satisfy the constraints -// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". +// applied by [CheckPath], such as "example.com/pkg/v1" or "example.com/pkg/v1.2". func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { if strings.HasPrefix(path, "gopkg.in/") { return splitGopkgIn(path) @@ -582,7 +582,7 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { // MatchPathMajor reports whether the semantic version v // matches the path major version pathMajor. // -// MatchPathMajor returns true if and only if CheckPathMajor returns nil. +// MatchPathMajor returns true if and only if [CheckPathMajor] returns nil. func MatchPathMajor(v, pathMajor string) bool { return CheckPathMajor(v, pathMajor) == nil } @@ -622,7 +622,7 @@ func CheckPathMajor(v, pathMajor string) error { // PathMajorPrefix returns the major-version tag prefix implied by pathMajor. // An empty PathMajorPrefix allows either v0 or v1. // -// Note that MatchPathMajor may accept some versions that do not actually begin +// Note that [MatchPathMajor] may accept some versions that do not actually begin // with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' // pathMajor, even though that pathMajor implies 'v1' tagging. func PathMajorPrefix(pathMajor string) string { @@ -643,7 +643,7 @@ func PathMajorPrefix(pathMajor string) string { } // CanonicalVersion returns the canonical form of the version string v. -// It is the same as semver.Canonical(v) except that it preserves the special build suffix "+incompatible". +// It is the same as [semver.Canonical] except that it preserves the special build suffix "+incompatible". func CanonicalVersion(v string) string { cv := semver.Canonical(v) if semver.Build(v) == "+incompatible" { @@ -652,8 +652,8 @@ func CanonicalVersion(v string) string { return cv } -// Sort sorts the list by Path, breaking ties by comparing Version fields. -// The Version fields are interpreted as semantic versions (using semver.Compare) +// Sort sorts the list by Path, breaking ties by comparing [Version] fields. +// The Version fields are interpreted as semantic versions (using [semver.Compare]) // optionally followed by a tie-breaking suffix introduced by a slash character, // like in "v0.0.1/go.mod". func Sort(list []Version) { @@ -793,7 +793,7 @@ func unescapeString(escaped string) (string, bool) { } // MatchPrefixPatterns reports whether any path prefix of target matches one of -// the glob patterns (as defined by path.Match) in the comma-separated globs +// the glob patterns (as defined by [path.Match]) in the comma-separated globs // list. This implements the algorithm used when matching a module path to the // GOPRIVATE environment variable, as described by 'go help module-private'. // diff --git a/vendor/golang.org/x/mod/module/pseudo.go b/vendor/golang.org/x/mod/module/pseudo.go index f04ad378..9cf19d32 100644 --- a/vendor/golang.org/x/mod/module/pseudo.go +++ b/vendor/golang.org/x/mod/module/pseudo.go @@ -125,7 +125,7 @@ func IsPseudoVersion(v string) bool { } // IsZeroPseudoVersion returns whether v is a pseudo-version with a zero base, -// timestamp, and revision, as returned by ZeroPseudoVersion. +// timestamp, and revision, as returned by [ZeroPseudoVersion]. func IsZeroPseudoVersion(v string) bool { return v == ZeroPseudoVersion(semver.Major(v)) } diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index a30a22bf..9a2dfd33 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -140,7 +140,7 @@ func Compare(v, w string) int { // Max canonicalizes its arguments and then returns the version string // that compares greater. // -// Deprecated: use Compare instead. In most cases, returning a canonicalized +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized // version is not expected or desired. func Max(v, w string) string { v = Canonical(v) @@ -151,7 +151,7 @@ func Max(v, w string) string { return w } -// ByVersion implements sort.Interface for sorting semantic version strings. +// ByVersion implements [sort.Interface] for sorting semantic version strings. type ByVersion []string func (vs ByVersion) Len() int { return len(vs) } @@ -164,7 +164,7 @@ func (vs ByVersion) Less(i, j int) bool { return vs[i] < vs[j] } -// Sort sorts a list of semantic version strings using ByVersion. +// Sort sorts a list of semantic version strings using [ByVersion]. func Sort(list []string) { sort.Sort(ByVersion(list)) } diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c..00000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 2cb9c408..00000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index 64d31ecc..00000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 7b6b6851..00000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index 1f971534..00000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go index a3067f8d..e6f55cbd 100644 --- a/vendor/golang.org/x/net/http2/databuffer.go +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -20,41 +20,44 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 5bf62b03..00000000 --- a/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/vendor/golang.org/x/net/http2/go115.go b/vendor/golang.org/x/net/http2/go115.go deleted file mode 100644 index 908af1ab..00000000 --- a/vendor/golang.org/x/net/http2/go115.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.15 -// +build go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/vendor/golang.org/x/net/http2/go118.go b/vendor/golang.org/x/net/http2/go118.go deleted file mode 100644 index aca4b2b3..00000000 --- a/vendor/golang.org/x/net/http2/go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index cc0baa81..00000000 --- a/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/vendor/golang.org/x/net/http2/not_go115.go b/vendor/golang.org/x/net/http2/not_go115.go deleted file mode 100644 index e6c04cf7..00000000 --- a/vendor/golang.org/x/net/http2/not_go115.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.15 -// +build !go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext opens a TLS connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if cfg.InsecureSkipVerify { - return cn, nil - } - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - return cn, nil -} diff --git a/vendor/golang.org/x/net/http2/not_go118.go b/vendor/golang.org/x/net/http2/not_go118.go deleted file mode 100644 index eab532c9..00000000 --- a/vendor/golang.org/x/net/http2/not_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 02c88b6b..ae94c640 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2549,7 +2549,6 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } + responseWriterStatePool.Put(rws) } // Push errors. @@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 4515b22c..df578b86 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/vendor/golang.org/x/net/idna/go118.go b/vendor/golang.org/x/net/idna/go118.go index c5c4338d..712f1ad8 100644 --- a/vendor/golang.org/x/net/idna/go118.go +++ b/vendor/golang.org/x/net/idna/go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go index 64ccf85f..7b371788 100644 --- a/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go index ee1698ce..cc6a892a 100644 --- a/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/vendor/golang.org/x/net/idna/pre_go118.go b/vendor/golang.org/x/net/idna/pre_go118.go index 3aaccab1..40e74bb3 100644 --- a/vendor/golang.org/x/net/idna/pre_go118.go +++ b/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package idna diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go index d1d62ef4..c6c2bf10 100644 --- a/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 167efba7..76789393 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.0.0.go b/vendor/golang.org/x/net/idna/tables12.0.0.go index ab40f7bc..0600cd2a 100644 --- a/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/tables13.0.0.go b/vendor/golang.org/x/net/idna/tables13.0.0.go index 66701ead..2fb768ef 100644 --- a/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables15.0.0.go b/vendor/golang.org/x/net/idna/tables15.0.0.go index 40033778..5ff05fe1 100644 --- a/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package idna diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go index 4074b533..0f25e84c 100644 --- a/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package idna diff --git a/vendor/golang.org/x/net/idna/trie12.0.0.go b/vendor/golang.org/x/net/idna/trie12.0.0.go index bb63f904..8a75b966 100644 --- a/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.16 -// +build !go1.16 package idna diff --git a/vendor/golang.org/x/net/idna/trie13.0.0.go b/vendor/golang.org/x/net/idna/trie13.0.0.go index 7d68a8dc..fa45bb90 100644 --- a/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.16 -// +build go1.16 package idna diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 00000000..e99c92f3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 16c6c6b9..e6158794 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index a7e27b3d..9c79aa0a 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !appengine -// +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 2cf71f0f..02ccd08a 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -12,6 +12,7 @@ import ( "os" "path/filepath" "runtime" + "sync" "time" "cloud.google.com/go/compute/metadata" @@ -19,7 +20,10 @@ import ( "golang.org/x/oauth2/authhandler" ) -const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +const ( + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + universeDomainDefault = "googleapis.com" +) // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: @@ -37,6 +41,75 @@ type Credentials struct { // environment and not with a credentials file, e.g. when code is // running on Google Cloud Platform. JSON []byte + + udMu sync.Mutex // guards universeDomain + // universeDomain is the default service domain for a given Cloud universe. + universeDomain string +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// +// The default value is "googleapis.com". +// +// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports +// obtaining the universe domain when authenticating via the GCE metadata server. +// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the +// default value when authenticating via the GCE metadata server. +// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). +func (c *Credentials) UniverseDomain() string { + if c.universeDomain == "" { + return universeDomainDefault + } + return c.universeDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe. +// +// The default value is "googleapis.com". +// +// It obtains the universe domain from the attached service account on GCE when +// authenticating via the GCE metadata server. See also [The attached service +// account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). +// If the GCE metadata server returns a 404 error, the default value is +// returned. If the GCE metadata server returns an error other than 404, the +// error is returned. +func (c *Credentials) GetUniverseDomain() (string, error) { + c.udMu.Lock() + defer c.udMu.Unlock() + if c.universeDomain == "" && metadata.OnGCE() { + // If we're on Google Compute Engine, an App Engine standard second + // generation runtime, or App Engine flexible, use the metadata server. + err := c.computeUniverseDomain() + if err != nil { + return "", err + } + } + // If not on Google Compute Engine, or in case of any non-error path in + // computeUniverseDomain that did not set universeDomain, set the default + // universe domain. + if c.universeDomain == "" { + c.universeDomain = universeDomainDefault + } + return c.universeDomain, nil +} + +// computeUniverseDomain fetches the default service domain for a given Cloud +// universe from Google Compute Engine (GCE)'s metadata server. It's only valid +// to use this method if your program is running on a GCE instance. +func (c *Credentials) computeUniverseDomain() error { + var err error + c.universeDomain, err = metadata.Get("universe/universe_domain") + if err != nil { + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + c.universeDomain = universeDomainDefault + return nil + } else { + return err + } + } + return nil } // DefaultCredentials is the old name of Credentials. @@ -76,6 +149,12 @@ type CredentialsParams struct { // Note: This option is currently only respected when using credentials // fetched from the GCE metadata server. EarlyTokenRefresh time.Duration + + // UniverseDomain is the default service domain for a given Cloud universe. + // Only supported in authentication flows that support universe domains. + // This value takes precedence over a universe domain explicitly specified + // in a credentials config file or by the GCE metadata server. Optional. + UniverseDomain string } func (params CredentialsParams) deepCopy() CredentialsParams { @@ -160,8 +239,9 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar if metadata.OnGCE() { id, _ := metadata.ProjectID() return &Credentials{ - ProjectID: id, - TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + ProjectID: id, + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + universeDomain: params.UniverseDomain, }, nil } @@ -200,15 +280,26 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err } + + universeDomain := f.UniverseDomain + if params.UniverseDomain != "" { + universeDomain = params.UniverseDomain + } + // Authorized user credentials are only supported in the googleapis.com universe. + if f.Type == userCredentialsKey { + universeDomain = universeDomainDefault + } + ts, err := f.tokenSource(ctx, params) if err != nil { return nil, err } ts = newErrWrappingTokenSource(ts) return &Credentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + universeDomain: universeDomain, }, nil } diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go index ca717634..03c42c6f 100644 --- a/vendor/golang.org/x/oauth2/google/doc.go +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -101,6 +101,8 @@ // executable-sourced credentials), please check out: // https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in // +// # Security considerations +// // Note that this library does not perform any validation on the token_url, token_info_url, // or service_account_impersonation_url fields of the credential configuration. // It is not recommended to use a credential configuration that you did not generate with diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index cc122388..c66c5352 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -16,14 +16,16 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/google/internal/externalaccount" + "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" "golang.org/x/oauth2/jwt" ) // Endpoint is Google's OAuth 2.0 default endpoint. var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - AuthStyle: oauth2.AuthStyleInParams, + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + DeviceAuthURL: "https://oauth2.googleapis.com/device/code", + AuthStyle: oauth2.AuthStyleInParams, } // MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. @@ -95,10 +97,11 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { // JSON key file types. const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" - externalAccountKey = "external_account" - impersonatedServiceAccount = "impersonated_service_account" + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + externalAccountAuthorizedUserKey = "external_account_authorized_user" + impersonatedServiceAccount = "impersonated_service_account" ) // credentialsFile is the unmarshalled representation of a credentials file. @@ -106,12 +109,13 @@ type credentialsFile struct { Type string `json:"type"` // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - AuthURL string `json:"auth_uri"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + UniverseDomain string `json:"universe_domain"` // User Credential fields // (These typically come from gcloud auth.) @@ -131,6 +135,9 @@ type credentialsFile struct { QuotaProjectID string `json:"quota_project_id"` WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + // External Account Authorized User fields + RevokeURL string `json:"revoke_url"` + // Service account impersonation SourceCredentials *credentialsFile `json:"source_credentials"` } @@ -199,6 +206,19 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar WorkforcePoolUserProject: f.WorkforcePoolUserProject, } return cfg.TokenSource(ctx) + case externalAccountAuthorizedUserKey: + cfg := &externalaccountauthorizeduser.Config{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + RevokeURL: f.RevokeURL, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + } + return cfg.TokenSource(ctx) case impersonatedServiceAccount: if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index 2bf3202b..bd4efd19 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -274,49 +274,6 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } -func (cs awsCredentialSource) validateMetadataServers() error { - if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { - return err - } - if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { - return err - } - return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") -} - -var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} - -func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { - if metadataUrl == "" { - // Zero value means use default, which is valid. - return true - } - - u, err := url.Parse(metadataUrl) - if err != nil { - // Unparseable URL means invalid - return false - } - - for _, validHostname := range validHostnames { - if u.Hostname() == validHostname { - // If it's one of the valid hostnames, everything is good - return true - } - } - - // hostname not found in our allowlist, so not valid - return false -} - -func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { - if !cs.isValidMetadataServer(metadataUrl) { - return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) - } - - return nil -} - func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -339,6 +296,10 @@ func shouldUseMetadataServer() bool { return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() } +func (cs awsCredentialSource) credentialSourceType() string { + return "aws" +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { headers := make(map[string]string) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index dcd252a6..33288d36 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -8,13 +8,12 @@ import ( "context" "fmt" "net/http" - "net/url" "regexp" "strconv" - "strings" "time" "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" ) // now aliases time.Now for testing @@ -63,31 +62,10 @@ type Config struct { WorkforcePoolUserProject string } -// Each element consists of a list of patterns. validateURLs checks for matches -// that include all elements in a given list, in that order. - var ( validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) -func validateURL(input string, patterns []*regexp.Regexp, scheme string) bool { - parsed, err := url.Parse(input) - if err != nil { - return false - } - if !strings.EqualFold(parsed.Scheme, scheme) { - return false - } - toTest := parsed.Host - - for _, pattern := range patterns { - if pattern.MatchString(toTest) { - return true - } - } - return false -} - func validateWorkforceAudience(input string) bool { return validWorkforceAudiencePattern.MatchString(input) } @@ -185,10 +163,6 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL } - if err := awsCredSource.validateMetadataServers(); err != nil { - return nil, err - } - return awsCredSource, nil } } else if c.CredentialSource.File != "" { @@ -202,6 +176,7 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { } type baseCredentialSource interface { + credentialSourceType() string subjectToken() (string, error) } @@ -211,6 +186,15 @@ type tokenSource struct { conf *Config } +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + // Token allows tokenSource to conform to the oauth2.TokenSource interface. func (ts tokenSource) Token() (*oauth2.Token, error) { conf := ts.conf @@ -224,7 +208,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { if err != nil { return nil, err } - stsRequest := stsTokenExchangeRequest{ + stsRequest := stsexchange.TokenExchangeRequest{ GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", Audience: conf.Audience, Scope: conf.Scopes, @@ -234,7 +218,8 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { } header := make(http.Header) header.Add("Content-Type", "application/x-www-form-urlencoded") - clientAuth := clientAuthentication{ + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ AuthStyle: oauth2.AuthStyleInHeader, ClientID: conf.ClientID, ClientSecret: conf.ClientSecret, @@ -247,7 +232,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { "userProject": conf.WorkforcePoolUserProject, } } - stsResp, err := exchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go index 579bcce5..6497dc02 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -233,6 +233,10 @@ func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte return "", tokenTypeError(source) } +func (cs executableCredentialSource) credentialSourceType() string { + return "executable" +} + func (cs executableCredentialSource) subjectToken() (string, error) { if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { return token, err diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go index e953ddb4..f35f73c5 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go @@ -19,6 +19,10 @@ type fileCredentialSource struct { Format format } +func (cs fileCredentialSource) credentialSourceType() string { + return "file" +} + func (cs fileCredentialSource) subjectToken() (string, error) { tokenFile, err := os.Open(cs.File) if err != nil { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go new file mode 100644 index 00000000..1d5aad2e --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go index 16dca654..606bb4e8 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go @@ -23,6 +23,10 @@ type urlCredentialSource struct { ctx context.Context } +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + func (cs urlCredentialSource) subjectToken() (string, error) { client := oauth2.NewClient(cs.ctx, nil) req, err := http.NewRequest("GET", cs.URL, nil) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go new file mode 100644 index 00000000..cb582070 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccountauthorizeduser + +import ( + "context" + "errors" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +// now aliases time.Now for testing. +var now = func() time.Time { + return time.Now().UTC() +} + +var tokenValid = func(token oauth2.Token) bool { + return token.Valid() +} + +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and + // the provider identifier in that pool. + Audience string + // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. + RefreshToken string + // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as + // None if the token can not be refreshed. + TokenURL string + // TokenInfoURL is the optional STS endpoint URL for token introspection. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP + // access token. When provided, STS will be called with additional basic authentication using client_id as username + // and client_secret as password. + ClientSecret string + // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. + Token string + // Expiry is the optional expiration datetime of the OAuth 2.0 access token. + Expiry time.Time + // RevokeURL is the optional STS endpoint URL for revoking tokens. + RevokeURL string + // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the + // project used to create the credentials. + QuotaProjectID string + Scopes []string +} + +func (c *Config) canRefresh() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + var token oauth2.Token + if c.Token != "" && !c.Expiry.IsZero() { + token = oauth2.Token{ + AccessToken: c.Token, + Expiry: c.Expiry, + TokenType: "Bearer", + } + } + if !tokenValid(token) && !c.canRefresh() { + return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + + return oauth2.ReuseTokenSource(&token, ts), nil +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + if !conf.canRefresh() { + return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") + } + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + + stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("oauth2/google: got invalid expiry from security token service") + } + + if stsResponse.RefreshToken != "" { + conf.RefreshToken = stsResponse.RefreshToken + } + + token := &oauth2.Token{ + AccessToken: stsResponse.AccessToken, + Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + TokenType: "Bearer", + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go similarity index 88% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go index 99987ce2..ebd520ea 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "encoding/base64" @@ -12,8 +12,8 @@ import ( "golang.org/x/oauth2" ) -// clientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. -type clientAuthentication struct { +// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { // AuthStyle can be either basic or request-body AuthStyle oauth2.AuthStyle ClientID string @@ -23,7 +23,7 @@ type clientAuthentication struct { // InjectAuthentication is used to add authentication to a Secure Token Service exchange // request. It modifies either the passed url.Values or http.Header depending on the desired // authentication format. -func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { return } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go similarity index 68% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go index e6fcae5f..1a0bebd1 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "context" @@ -18,14 +18,17 @@ import ( "golang.org/x/oauth2" ) -// exchangeToken performs an oauth2 token exchange with the provided endpoint. +func defaultHeader() http.Header { + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + return header +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. // The first 4 fields are all mandatory. headers can be used to pass additional // headers beyond the bare minimum required by the token exchange. options can // be used to pass additional JSON-structured options to the remote server. -func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchangeRequest, authentication clientAuthentication, headers http.Header, options map[string]interface{}) (*stsTokenExchangeResponse, error) { - - client := oauth2.NewClient(ctx, nil) - +func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { data := url.Values{} data.Set("audience", request.Audience) data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") @@ -41,13 +44,28 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan data.Set("options", string(opts)) } + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", refreshToken) + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { + if headers == nil { + headers = defaultHeader() + } + client := oauth2.NewClient(ctx, nil) authentication.InjectAuthentication(data, headers) encodedData := data.Encode() req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) - } req = req.WithContext(ctx) for key, list := range headers { @@ -71,7 +89,7 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) } - var stsResp stsTokenExchangeResponse + var stsResp Response err = json.Unmarshal(body, &stsResp) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) @@ -81,8 +99,8 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan return &stsResp, nil } -// stsTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. -type stsTokenExchangeRequest struct { +// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type TokenExchangeRequest struct { ActingParty struct { ActorToken string ActorTokenType string @@ -96,8 +114,8 @@ type stsTokenExchangeRequest struct { SubjectTokenType string } -// stsTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. -type stsTokenExchangeResponse struct { +// Response is used to decode the remote server response during an oauth2 token exchange. +type Response struct { AccessToken string `json:"access_token"` IssuedTokenType string `json:"issued_token_type"` TokenType string `json:"token_type"` diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index e1755d1d..d28140f7 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine package internal diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 58901bda..e83ddeef 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,6 +18,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" ) @@ -115,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -189,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -222,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9085fabe..90a2c3d6 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -58,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -71,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -139,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -162,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -207,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 00000000..50593b6d --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5ffce976..5bbb3321 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -164,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index b18efb74..948a3ee6 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -4,6 +4,9 @@ // Package errgroup provides synchronization, error propagation, and Context // cancelation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. package errgroup import ( diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go index 7d419d37..f93c740b 100644 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.20 -// +build go1.20 package errgroup diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go index 1795c18a..88ce3343 100644 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.20 -// +build !go1.20 package errgroup diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s index db9171c2..269e173c 100644 --- a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go index 8aaeef54..9bf0c32e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s index c61f95a0..fcb9a388 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_arm64.s +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go index ccf542a7..a8acd3e3 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go index 0af2f248..c8ae6ddc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go index fa7cdb9b..910728fb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gc -// +build 386 amd64 amd64p32 -// +build gc package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go index 2aff3189..7f194678 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo -// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go index 4bfbda61..9526d2ce 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo -// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c index 6cc73109..3f73a05d 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo #include #include diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go index 863d415a..99c60fe9 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gccgo -// +build 386 amd64 amd64p32 -// +build gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go index 159a686f..743eb543 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !386 && !amd64 && !amd64p32 && !arm64 -// +build !386,!amd64,!amd64p32,!arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go index 6000db4c..4686c1d5 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index f4992b1a..cd63e733 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x -// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go index 021356d6..197188e6 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go index 0f57b05b..55863585 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 -// +build loong64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go index f4063c66..fedb00cc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build mips64 || mips64le -// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go index 07c4e36d..ffb4ec7e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build mips || mipsle -// +build mips mipsle package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go index d7b4fb4c..e9ecf2a4 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && arm -// +build !linux,arm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index f3cde129..5341e7f8 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && !netbsd && !openbsd && arm64 -// +build !linux,!netbsd,!openbsd,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go index 0dafe964..5f8f2419 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && (mips64 || mips64le) -// +build !linux -// +build mips64 mips64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go index 060d46b6..89608fba 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !linux && (ppc64 || ppc64le) -// +build !aix -// +build !linux -// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go index dd10eb79..5ab87808 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !linux && riscv64 -// +build !linux,riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go index 4e8acd16..c14f12b1 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_ppc64x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 || ppc64le -// +build ppc64 ppc64le package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index ff7da60e..7f0c79c0 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 -// +build riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s index 96f81e20..1fb4b701 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_s390x.s +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go index 7747d888..384787ea 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build wasm -// +build wasm package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 2dcde828..c29f5e4c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 || amd64 || amd64p32 -// +build 386 amd64 amd64p32 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.s b/vendor/golang.org/x/sys/cpu/cpu_x86.s index 39acab2f..7d7ba33e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.s +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (386 || amd64 || amd64p32) && gc -// +build 386 amd64 amd64p32 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/cpu/endian_big.go b/vendor/golang.org/x/sys/cpu/endian_big.go index 93ce03a3..7fe04b0a 100644 --- a/vendor/golang.org/x/sys/cpu/endian_big.go +++ b/vendor/golang.org/x/sys/cpu/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/endian_little.go b/vendor/golang.org/x/sys/cpu/endian_little.go index 55db853e..48eccc4c 100644 --- a/vendor/golang.org/x/sys/cpu/endian_little.go +++ b/vendor/golang.org/x/sys/cpu/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh || wasm -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh wasm package cpu diff --git a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go index d87bd6b3..4cd64c70 100644 --- a/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go +++ b/vendor/golang.org/x/sys/cpu/proc_cpuinfo_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 -// +build linux,arm64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go index b975ea2a..4c9788ea 100644 --- a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.21 -// +build go1.21 package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go index 96134157..1b9ccb09 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -9,7 +9,6 @@ // gccgo's libgo and thus must not used a CGo method. //go:build aix && gccgo -// +build aix,gccgo package cpu diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go index 904be42f..e8b6cdbe 100644 --- a/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go @@ -7,7 +7,6 @@ // (See golang.org/issue/32102) //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package cpu diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go deleted file mode 100644 index 3bf40fdf..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package execabs is a drop-in replacement for os/exec -// that requires PATH lookups to find absolute paths. -// That is, execabs.Command("cmd") runs the same PATH lookup -// as exec.Command("cmd"), but if the result is a path -// which is relative, the Run and Start methods will report -// an error instead of running the executable. -// -// See https://blog.golang.org/path-security for more information -// about when it may be necessary or appropriate to use this package. -package execabs - -import ( - "context" - "fmt" - "os/exec" - "path/filepath" - "reflect" - "unsafe" -) - -// ErrNotFound is the error resulting if a path search failed to find an executable file. -// It is an alias for exec.ErrNotFound. -var ErrNotFound = exec.ErrNotFound - -// Cmd represents an external command being prepared or run. -// It is an alias for exec.Cmd. -type Cmd = exec.Cmd - -// Error is returned by LookPath when it fails to classify a file as an executable. -// It is an alias for exec.Error. -type Error = exec.Error - -// An ExitError reports an unsuccessful exit by a command. -// It is an alias for exec.ExitError. -type ExitError = exec.ExitError - -func relError(file, path string) error { - return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) -} - -// LookPath searches for an executable named file in the directories -// named by the PATH environment variable. If file contains a slash, -// it is tried directly and the PATH is not consulted. The result will be -// an absolute path. -// -// LookPath differs from exec.LookPath in its handling of PATH lookups, -// which are used for file names without slashes. If exec.LookPath's -// PATH lookup would have returned an executable from the current directory, -// LookPath instead returns an error. -func LookPath(file string) (string, error) { - path, err := exec.LookPath(file) - if err != nil && !isGo119ErrDot(err) { - return "", err - } - if filepath.Base(file) == file && !filepath.IsAbs(path) { - return "", relError(file, path) - } - return path, nil -} - -func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { - // exec.Command was called with a bare binary name and - // exec.LookPath returned a path which is not absolute. - // Set cmd.lookPathErr and clear cmd.Path so that it - // cannot be run. - lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) - if *lookPathErr == nil { - *lookPathErr = relError(name, cmd.Path) - } - cmd.Path = "" - } -} - -// CommandContext is like Command but includes a context. -// -// The provided context is used to kill the process (by calling os.Process.Kill) -// if the context becomes done before the command completes on its own. -func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, name, arg...) - fixCmd(name, cmd) - return cmd - -} - -// Command returns the Cmd struct to execute the named program with the given arguments. -// See exec.Command for most details. -// -// Command differs from exec.Command in its handling of PATH lookups, -// which are used when the program name contains no slashes. -// If exec.Command would have returned an exec.Cmd configured to run an -// executable from the current directory, Command instead -// returns an exec.Cmd that will return an error from Start or Run. -func Command(name string, arg ...string) *exec.Cmd { - cmd := exec.Command(name, arg...) - fixCmd(name, cmd) - return cmd -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go deleted file mode 100644 index 2000064a..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.19 -// +build !go1.19 - -package execabs - -import "os/exec" - -func isGo119ErrDot(err error) bool { - return false -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return false -} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go deleted file mode 100644 index f364b341..00000000 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package execabs - -import ( - "errors" - "os/exec" -) - -func isGo119ErrDot(err error) bool { - return errors.Is(err, exec.ErrDot) -} - -func isGo119ErrFieldSet(cmd *exec.Cmd) bool { - return cmd.Err != nil -} diff --git a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go index c9b69937..73687de7 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.5 -// +build go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/pwd_plan9.go b/vendor/golang.org/x/sys/plan9/pwd_plan9.go index 98bf56b7..fb945821 100644 --- a/vendor/golang.org/x/sys/plan9/pwd_plan9.go +++ b/vendor/golang.org/x/sys/plan9/pwd_plan9.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.5 -// +build !go1.5 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race.go b/vendor/golang.org/x/sys/plan9/race.go index 62377d2f..c02d9ed3 100644 --- a/vendor/golang.org/x/sys/plan9/race.go +++ b/vendor/golang.org/x/sys/plan9/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && race -// +build plan9,race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/race0.go b/vendor/golang.org/x/sys/plan9/race0.go index f8da3087..7b15e15f 100644 --- a/vendor/golang.org/x/sys/plan9/race0.go +++ b/vendor/golang.org/x/sys/plan9/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 && !race -// +build plan9,!race package plan9 diff --git a/vendor/golang.org/x/sys/plan9/str.go b/vendor/golang.org/x/sys/plan9/str.go index 55fa8d02..ba3e8ff8 100644 --- a/vendor/golang.org/x/sys/plan9/str.go +++ b/vendor/golang.org/x/sys/plan9/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go index 67e5b011..d631fd66 100644 --- a/vendor/golang.org/x/sys/plan9/syscall.go +++ b/vendor/golang.org/x/sys/plan9/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build plan9 -// +build plan9 // Package plan9 contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go index 3f40b9bd..f780d5c8 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && 386 -// +build plan9,386 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go index 0e6a96aa..7de61065 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && amd64 -// +build plan9,amd64 package plan9 diff --git a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go index 244c501b..ea85780f 100644 --- a/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go +++ b/vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build plan9 && arm -// +build plan9,arm package plan9 diff --git a/vendor/golang.org/x/sys/unix/aliases.go b/vendor/golang.org/x/sys/unix/aliases.go index abc89c10..e7d3df4b 100644 --- a/vendor/golang.org/x/sys/unix/aliases.go +++ b/vendor/golang.org/x/sys/unix/aliases.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) && go1.9 -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos -// +build go1.9 package unix diff --git a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s index db9171c2..269e173c 100644 --- a/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_386.s b/vendor/golang.org/x/sys/unix/asm_bsd_386.s index e0fcd9b3..a4fcef0e 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_386.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_386.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s index 2b99c349..1e63615c 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_amd64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || netbsd || openbsd) && gc -// +build darwin dragonfly freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s index d702d4ad..6496c310 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (freebsd || netbsd || openbsd) && gc -// +build freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s index fe36a739..4fd1f54d 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_arm64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s index e5b9a848..42f7eb9e 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_ppc64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s index d560019e..f8902667 100644 --- a/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_bsd_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || freebsd || netbsd || openbsd) && gc -// +build darwin freebsd netbsd openbsd -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/golang.org/x/sys/unix/asm_linux_386.s index 8fd101d0..3b473487 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_386.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_386.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s index 7ed38e43..67e29f31 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index 8ef1d514..d6ae269c 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index 98ae0276..01e5e253 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && arm64 && gc -// +build linux -// +build arm64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s index 56535728..2abf12f6 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_loong64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_loong64.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && loong64 && gc -// +build linux -// +build loong64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s index 21231d2c..f84bae71 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) && gc -// +build linux -// +build mips64 mips64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s index 6783b26c..f08f6280 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) && gc -// +build linux -// +build mips mipsle -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s index 19d49893..bdfc024d 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) && gc -// +build linux -// +build ppc64 ppc64le -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index e42eb81d..2e8c9961 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && gc -// +build riscv64 -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s index c46aab33..2c394b11 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && s390x && gc -// +build linux -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s index 5e7a1169..fab586a2 100644 --- a/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/asm_openbsd_mips64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s index f8c5394c..f949ec54 100644 --- a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gc -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s index 3b54e185..2f67ba86 100644 --- a/vendor/golang.org/x/sys/unix/asm_zos_s390x.s +++ b/vendor/golang.org/x/sys/unix/asm_zos_s390x.s @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x && gc -// +build zos -// +build s390x -// +build gc #include "textflag.h" diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index 0b7c6adb..a0865789 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd -// +build freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/golang.org/x/sys/unix/constants.go index 394a3965..6fb7cb77 100644 --- a/vendor/golang.org/x/sys/unix/constants.go +++ b/vendor/golang.org/x/sys/unix/constants.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go index 65a99850..d7851346 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc // Functions to access/create device major and minor numbers matching the // encoding used by AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go index 8fc08ad0..623a5e69 100644 --- a/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 // Functions to access/create device major and minor numbers matching the // encoding used AIX. diff --git a/vendor/golang.org/x/sys/unix/dev_zos.go b/vendor/golang.org/x/sys/unix/dev_zos.go index a388e59a..bb6a64fe 100644 --- a/vendor/golang.org/x/sys/unix/dev_zos.go +++ b/vendor/golang.org/x/sys/unix/dev_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Functions to access/create device major and minor numbers matching the // encoding used by z/OS. diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 2499f977..1ebf1178 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/endian_big.go b/vendor/golang.org/x/sys/unix/endian_big.go index a5202655..1095fd31 100644 --- a/vendor/golang.org/x/sys/unix/endian_big.go +++ b/vendor/golang.org/x/sys/unix/endian_big.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build armbe || arm64be || m68k || mips || mips64 || mips64p32 || ppc || ppc64 || s390 || s390x || shbe || sparc || sparc64 -// +build armbe arm64be m68k mips mips64 mips64p32 ppc ppc64 s390 s390x shbe sparc sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index b0f2bc4a..b9f0e277 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // //go:build 386 || amd64 || amd64p32 || alpha || arm || arm64 || loong64 || mipsle || mips64le || mips64p32le || nios2 || ppc64le || riscv || riscv64 || sh -// +build 386 amd64 amd64p32 alpha arm arm64 loong64 mipsle mips64le mips64p32le nios2 ppc64le riscv riscv64 sh package unix diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 29ccc4d1..a96da71f 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Unix environment variables. diff --git a/vendor/golang.org/x/sys/unix/epoll_zos.go b/vendor/golang.org/x/sys/unix/epoll_zos.go index cedaf7e0..7753fdde 100644 --- a/vendor/golang.org/x/sys/unix/epoll_zos.go +++ b/vendor/golang.org/x/sys/unix/epoll_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index e9b99125..6200876f 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build dragonfly || freebsd || linux || netbsd || openbsd -// +build dragonfly freebsd linux netbsd openbsd +//go:build dragonfly || freebsd || linux || netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go index 29d44808..13b4acd5 100644 --- a/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go +++ b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) || (linux && ppc) -// +build linux,386 linux,arm linux,mips linux,mipsle linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go index a8068f94..9e83d18c 100644 --- a/vendor/golang.org/x/sys/unix/fdset.go +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/fstatfs_zos.go b/vendor/golang.org/x/sys/unix/fstatfs_zos.go index e377cc9f..c8bde601 100644 --- a/vendor/golang.org/x/sys/unix/fstatfs_zos.go +++ b/vendor/golang.org/x/sys/unix/fstatfs_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index b06f52d7..aca5721d 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd package unix diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index f98a1c54..d468b7b4 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && !aix && !hurd -// +build gccgo,!aix,!hurd #include #include diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go index e60e49a3..972d61bd 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gccgo && linux && amd64 -// +build gccgo,linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go index 15721a51..848840ae 100644 --- a/vendor/golang.org/x/sys/unix/ifreq_linux.go +++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go index 0d12c085..dbe680ea 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) } + +// IoctlLoopConfigure configures all loop device parameters in a single step +func IoctlLoopConfigure(fd int, value *LoopConfig) error { + return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) +} diff --git a/vendor/golang.org/x/sys/unix/ioctl_signed.go b/vendor/golang.org/x/sys/unix/ioctl_signed.go index 7def9580..5b0759bd 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_signed.go +++ b/vendor/golang.org/x/sys/unix/ioctl_signed.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || solaris -// +build aix solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go index 649913d1..20f470b9 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_unsigned.go +++ b/vendor/golang.org/x/sys/unix/ioctl_unsigned.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd -// +build darwin dragonfly freebsd hurd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index cdc21bf7..c8b2a750 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 47fa6a7e..c6492020 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -248,6 +248,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -283,10 +284,6 @@ struct ltchars { #include #endif -#ifndef MSG_FASTOPEN -#define MSG_FASTOPEN 0x20000000 -#endif - #ifndef PTRACE_GETREGS #define PTRACE_GETREGS 0xc #endif @@ -295,14 +292,6 @@ struct ltchars { #define PTRACE_SETREGS 0xd #endif -#ifndef SOL_NETLINK -#define SOL_NETLINK 270 -#endif - -#ifndef SOL_SMC -#define SOL_SMC 286 -#endif - #ifdef SOL_BLUETOOTH // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h // but it is already in bluetooth_linux.go @@ -319,10 +308,23 @@ struct ltchars { #undef TIPC_WAIT_FOREVER #define TIPC_WAIT_FOREVER 0xffffffff -// Copied from linux/l2tp.h -// Including linux/l2tp.h here causes conflicts between linux/in.h -// and netinet/in.h included via net/route.h above. -#define IPPROTO_L2TP 115 +// Copied from linux/netfilter/nf_nat.h +// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h +// and netinet/in.h. +#define NF_NAT_RANGE_MAP_IPS (1 << 0) +#define NF_NAT_RANGE_PROTO_SPECIFIED (1 << 1) +#define NF_NAT_RANGE_PROTO_RANDOM (1 << 2) +#define NF_NAT_RANGE_PERSISTENT (1 << 3) +#define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) +#define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) +#define NF_NAT_RANGE_NETMAP (1 << 6) +#define NF_NAT_RANGE_PROTO_RANDOM_ALL \ + (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) +#define NF_NAT_RANGE_MASK \ + (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ + NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ + NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ + NF_NAT_RANGE_NETMAP) // Copied from linux/hid.h. // Keep in sync with the size of the referenced fields. @@ -519,6 +521,7 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || + $2 == "LOOP_CONFIGURE" || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || @@ -560,7 +563,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || @@ -602,6 +605,9 @@ ccflags="$@" $2 ~ /^FSOPT_/ || $2 ~ /^WDIO[CFS]_/ || $2 ~ /^NFN/ || + $2 !~ /^NFT_META_IIFTYPE/ && + $2 ~ /^NFT_/ || + $2 ~ /^NF_NAT_/ || $2 ~ /^XDP_/ || $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || @@ -663,7 +669,6 @@ echo '// mkerrors.sh' "$@" echo '// Code generated by the command above; see README.md. DO NOT EDIT.' echo echo "//go:build ${GOARCH} && ${GOOS}" -echo "// +build ${GOARCH},${GOOS}" echo go tool cgo -godefs -- "$@" _const.go >_error.out cat _error.out | grep -vf _error.grep | grep -vf _signal.grep diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index ca051363..4b68e597 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || openbsd || solaris -// +build aix darwin dragonfly freebsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go index fa93d0aa..fd45fe52 100644 --- a/vendor/golang.org/x/sys/unix/mremap.go +++ b/vendor/golang.org/x/sys/unix/mremap.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux || netbsd -// +build linux netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go index 53f1b4c5..4d0a3430 100644 --- a/vendor/golang.org/x/sys/unix/pagesize_unix.go +++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris // For Unix, get the pagesize from the runtime. diff --git a/vendor/golang.org/x/sys/unix/pledge_openbsd.go b/vendor/golang.org/x/sys/unix/pledge_openbsd.go index eb48294b..6a09af53 100644 --- a/vendor/golang.org/x/sys/unix/pledge_openbsd.go +++ b/vendor/golang.org/x/sys/unix/pledge_openbsd.go @@ -8,54 +8,31 @@ import ( "errors" "fmt" "strconv" - "syscall" - "unsafe" ) // Pledge implements the pledge syscall. // -// The pledge syscall does not accept execpromises on OpenBSD releases -// before 6.3. -// -// execpromises must be empty when Pledge is called on OpenBSD -// releases predating 6.3, otherwise an error will be returned. +// This changes both the promises and execpromises; use PledgePromises or +// PledgeExecpromises to only change the promises or execpromises +// respectively. // // For more information see pledge(2). func Pledge(promises, execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - pptr, err := syscall.BytePtrFromString(promises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable will hold either a nil unsafe.Pointer or - // an unsafe.Pointer to a string (execpromises). - var expr unsafe.Pointer - - // If we're running on OpenBSD > 6.2, pass execpromises to the syscall. - if maj > 6 || (maj == 6 && min > 2) { - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - expr = unsafe.Pointer(exptr) - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, exptr) } // PledgePromises implements the pledge syscall. @@ -64,30 +41,16 @@ func Pledge(promises, execpromises string) error { // // For more information see pledge(2). func PledgePromises(promises string) error { - maj, min, err := majmin() - if err != nil { - return err - } - - err = pledgeAvailable(maj, min, "") - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - // This variable holds the execpromises and is always nil. - var expr unsafe.Pointer - - pptr, err := syscall.BytePtrFromString(promises) + pptr, err := BytePtrFromString(promises) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(unsafe.Pointer(pptr)), uintptr(expr), 0) - if e != 0 { - return e - } - - return nil + return pledge(pptr, nil) } // PledgeExecpromises implements the pledge syscall. @@ -96,30 +59,16 @@ func PledgePromises(promises string) error { // // For more information see pledge(2). func PledgeExecpromises(execpromises string) error { - maj, min, err := majmin() - if err != nil { + if err := pledgeAvailable(); err != nil { return err } - err = pledgeAvailable(maj, min, execpromises) + exptr, err := BytePtrFromString(execpromises) if err != nil { return err } - // This variable holds the promises and is always nil. - var pptr unsafe.Pointer - - exptr, err := syscall.BytePtrFromString(execpromises) - if err != nil { - return err - } - - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(pptr), uintptr(unsafe.Pointer(exptr)), 0) - if e != 0 { - return e - } - - return nil + return pledge(nil, exptr) } // majmin returns major and minor version number for an OpenBSD system. @@ -147,16 +96,15 @@ func majmin() (major int, minor int, err error) { // pledgeAvailable checks for availability of the pledge(2) syscall // based on the running OpenBSD version. -func pledgeAvailable(maj, min int, execpromises string) error { - // If OpenBSD <= 5.9, pledge is not available. - if (maj == 5 && min != 9) || maj < 5 { - return fmt.Errorf("pledge syscall is not available on OpenBSD %d.%d", maj, min) +func pledgeAvailable() error { + maj, min, err := majmin() + if err != nil { + return err } - // If OpenBSD <= 6.2 and execpromises is not empty, - // return an error - execpromises is not available before 6.3 - if (maj < 6 || (maj == 6 && min <= 2)) && execpromises != "" { - return fmt.Errorf("cannot use execpromises on OpenBSD %d.%d", maj, min) + // Require OpenBSD 6.4 as a minimum. + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Pledge on OpenBSD %d.%d", maj, min) } return nil diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff..3f0975f3 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a0..a4d35db5 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ios -// +build ios package unix diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go index 6f6c5fec..714d2aae 100644 --- a/vendor/golang.org/x/sys/unix/race.go +++ b/vendor/golang.org/x/sys/unix/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && race) || (linux && race) || (freebsd && race) -// +build darwin,race linux,race freebsd,race package unix diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go index 706e1322..4a9f6634 100644 --- a/vendor/golang.org/x/sys/unix/race0.go +++ b/vendor/golang.org/x/sys/unix/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || (darwin && !race) || (linux && !race) || (freebsd && !race) || netbsd || openbsd || solaris || dragonfly || zos -// +build aix darwin,!race linux,!race freebsd,!race netbsd openbsd solaris dragonfly zos package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdents.go b/vendor/golang.org/x/sys/unix/readdirent_getdents.go index 4d625756..dbd2b6cc 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdents.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdents.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd -// +build aix dragonfly freebsd linux netbsd openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go index 2a4ba47c..130398b6 100644 --- a/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go +++ b/vendor/golang.org/x/sys/unix/readdirent_getdirentries.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin -// +build darwin package unix diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 3865943f..c3a62dbb 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Socket control messages diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go index 0840fe4a..4a1eab37 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 63e8c838..5ea74da9 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos // Package unix contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index e94e6cda..67ce6cef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix -// +build aix // Aix system calls. // This file is compiled as ordinary Go code, @@ -107,7 +106,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index f2871fa9..1fdaa476 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 75718ec0..c87f9a9f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 4217de51..a00c3e54 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd // BSD system call wrappers shared by *BSD based systems // including OS X (Darwin) and FreeBSD. Like the other @@ -317,7 +316,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce..0eaecf5f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec996..f36c6707 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 53c96641..16dc6993 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && go1.12 -// +build darwin,go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 4e2d3212..14bab6b2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index b8da5100..3967bca7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 47155c48..eff19ada 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 08932093..4f24b517 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index d151a0d0..ac30759e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index d5cd64b3..aab725ca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 381fd467..ba46651f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build hurd -// +build hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go index 7cf54a3e..df89f9e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && hurd -// +build 386,hurd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go index 87db5a6a..a863f705 100644 --- a/vendor/golang.org/x/sys/unix/syscall_illumos.go +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -5,7 +5,6 @@ // illumos system calls not present on Solaris. //go:build amd64 && illumos -// +build amd64,illumos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index fb4e5022..0f85e29e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -61,15 +61,23 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) ( } //sys fchmodat(dirfd int, path string, mode uint32) (err error) - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior - // and check the flags. Otherwise the mode would be applied to the symlink - // destination which is not what the user expects. - if flags&^AT_SYMLINK_NOFOLLOW != 0 { - return EINVAL - } else if flags&AT_SYMLINK_NOFOLLOW != 0 { - return EOPNOTSUPP +//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + // Linux fchmodat doesn't support the flags parameter, but fchmodat2 does. + // Try fchmodat2 if flags are specified. + if flags != 0 { + err := fchmodat2(dirfd, path, mode, flags) + if err == ENOSYS { + // fchmodat2 isn't available. If the flags are known to be valid, + // return EOPNOTSUPP to indicate that fchmodat doesn't support them. + if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EINVAL + } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EOPNOTSUPP + } + } + return err } return fchmodat(dirfd, path, mode) } @@ -417,7 +425,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -1301,7 +1310,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { @@ -2482,3 +2491,5 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } + +//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index c7d9945e..506dafa7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go index 08086ac6..38d55641 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_alarm.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (386 || amd64 || mips || mipsle || mips64 || mipsle || ppc64 || ppc64le || ppc || s390x || sparc64) -// +build linux -// +build 386 amd64 mips mipsle mips64 mipsle ppc64 ppc64le ppc s390x sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 70601ce3..d557cf8d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go index 8b0f0f3a..facdb83b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && linux && gc -// +build amd64,linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index da298641..cd2dd797 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index f5266689..cf2ee6c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go index 2b1168d7..ffc4c2b6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc -// +build linux,gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go index 9843fb48..9ebfdcf4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gc && 386 -// +build linux,gc,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go index a6008fcc..5f2b57c4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gc_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && gc && linux -// +build arm,gc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go index 7740af24..d1a3ad82 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && 386 -// +build linux,gccgo,386 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go index e16a1229..f2f67423 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && gccgo && arm -// +build linux,gccgo,arm package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index f6ab02ec..3d0e9845 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 93fe59d2..70963a95 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips64 || mips64le) -// +build linux -// +build mips64 mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index aae7f0ff..c218ebd2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (mips || mipsle) -// +build linux -// +build mips mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go index 66eff19a..e6c48500 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 806aa257..7286a9aa 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 5e6ceee1..6f5a2889 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index 2f89e8f5..66f31210 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 7ca064ae..11d1f169 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 5199d282..7a5eb574 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 70a9c52e..62d8957a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index 3eb5942f..ce6a0688 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index fc6ccfd8..d46d689d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 6f34479b..b25343c7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -137,18 +137,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { - var _p0 unsafe.Pointer + var bufptr *Statfs_t var bufsize uintptr if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + bufptr = &buf[0] bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf)) } - r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = e1 - } - return + return getfsstat(bufptr, bufsize, flags) } //sysnb getresuid(ruid *_C_int, euid *_C_int, suid *_C_int) @@ -171,6 +166,20 @@ func Getresgid() (rgid, egid, sgid int) { //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys fcntl(fd int, cmd int, arg int) (n int, err error) +//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL + +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) + return err +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -326,4 +335,7 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) +//sys pledge(promises *byte, execpromises *byte) (err error) +//sys unveil(path *byte, flags *byte) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index 6baabcdc..9ddc89f4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index bab25360..70a3c96e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 8eed3c4d..265caa87 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 483dde99..ac4fda17 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go index 04aa43f4..0a451e6d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build openbsd -// +build openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go index c2796139..30a308cb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_ppc64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go index 23199a7f..ea954330 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_riscv64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index b99cfa13..21974af0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -128,7 +128,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { if n > 0 { sl += _Socklen(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- @@ -157,7 +158,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } const ImplementsGetwd = true diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 0bd25ef8..e02d8cea 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index f6eda270..77081de8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go index b6919ca5..05c95bcc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc.go @@ -3,8 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin || dragonfly || freebsd || (linux && !ppc64 && !ppc64le) || netbsd || openbsd || solaris) && gc -// +build darwin dragonfly freebsd linux,!ppc64,!ppc64le netbsd openbsd solaris -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go index f6f707ac..23f39b7a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix_gc_ppc64x.go @@ -3,9 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux && (ppc64le || ppc64) && gc -// +build linux -// +build ppc64le ppc64 -// +build gc package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 4596d041..b473038c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix @@ -1105,7 +1104,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { diff --git a/vendor/golang.org/x/sys/unix/sysvshm_linux.go b/vendor/golang.org/x/sys/unix/sysvshm_linux.go index 2c3a4437..4fcd38de 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_linux.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_linux.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build linux -// +build linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go index 5bb41d17..79a84f18 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build (darwin && !ios) || linux -// +build darwin,!ios linux package unix diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go index 71bddefd..9eb0db66 100644 --- a/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go +++ b/vendor/golang.org/x/sys/unix/sysvshm_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin && !ios -// +build darwin,!ios package unix diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 616b1b28..7997b190 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package unix diff --git a/vendor/golang.org/x/sys/unix/unveil_openbsd.go b/vendor/golang.org/x/sys/unix/unveil_openbsd.go index 168d5ae7..cb7e598c 100644 --- a/vendor/golang.org/x/sys/unix/unveil_openbsd.go +++ b/vendor/golang.org/x/sys/unix/unveil_openbsd.go @@ -4,39 +4,48 @@ package unix -import ( - "syscall" - "unsafe" -) +import "fmt" // Unveil implements the unveil syscall. // For more information see unveil(2). // Note that the special case of blocking further // unveil calls is handled by UnveilBlock. func Unveil(path string, flags string) error { - pathPtr, err := syscall.BytePtrFromString(path) - if err != nil { + if err := supportsUnveil(); err != nil { return err } - flagsPtr, err := syscall.BytePtrFromString(flags) + pathPtr, err := BytePtrFromString(path) if err != nil { return err } - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(unsafe.Pointer(pathPtr)), uintptr(unsafe.Pointer(flagsPtr)), 0) - if e != 0 { - return e + flagsPtr, err := BytePtrFromString(flags) + if err != nil { + return err } - return nil + return unveil(pathPtr, flagsPtr) } // UnveilBlock blocks future unveil calls. // For more information see unveil(2). func UnveilBlock() error { - // Both pointers must be nil. - var pathUnsafe, flagsUnsafe unsafe.Pointer - _, _, e := syscall.Syscall(SYS_UNVEIL, uintptr(pathUnsafe), uintptr(flagsUnsafe), 0) - if e != 0 { - return e + if err := supportsUnveil(); err != nil { + return err } + return unveil(nil, nil) +} + +// supportsUnveil checks for availability of the unveil(2) system call based +// on the running OpenBSD version. +func supportsUnveil() error { + maj, min, err := majmin() + if err != nil { + return err + } + + // unveil is not available before 6.4 + if maj < 6 || (maj == 6 && min <= 3) { + return fmt.Errorf("cannot call Unveil on OpenBSD %d.%d", maj, min) + } + return nil } diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go index f5f8e9f3..e1687939 100644 --- a/vendor/golang.org/x/sys/unix/xattr_bsd.go +++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build freebsd || netbsd -// +build freebsd netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index ca9799b7..2fb219d7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix // Created by cgo -godefs - DO NOT EDIT // cgo -godefs -- -maix32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 200c8c26..b0e6f5c8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -maix64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 14300762..e40fa852 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index ab044a74..bb02aa6c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 17bba0e4..c0e0f869 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index f8c2c513..6c692390 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 96310c3b..dd9163f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 777b69de..493a2a79 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index c557ac2d..8b437b30 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go index 341b4d96..67c02dd5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index f9c7f479..a5d3ff8d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -481,10 +480,13 @@ const ( BPF_FROM_BE = 0x8 BPF_FROM_LE = 0x0 BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_AFTER = 0x10 BPF_F_ALLOW_MULTI = 0x2 BPF_F_ALLOW_OVERRIDE = 0x1 BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_BEFORE = 0x8 + BPF_F_ID = 0x20 + BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 BPF_F_SLEEPABLE = 0x10 @@ -521,6 +523,7 @@ const ( BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 BPF_MEM = 0x60 + BPF_MEMSX = 0x80 BPF_MEMWORDS = 0x10 BPF_MINOR_VERSION = 0x1 BPF_MISC = 0x7 @@ -776,6 +779,8 @@ const ( DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 + DEVLINK_PORT_FN_CAP_IPSEC_CRYPTO = 0x4 + DEVLINK_PORT_FN_CAP_IPSEC_PACKET = 0x8 DEVLINK_PORT_FN_CAP_MIGRATABLE = 0x2 DEVLINK_PORT_FN_CAP_ROCE = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 @@ -1698,6 +1703,7 @@ const ( KEXEC_ON_CRASH = 0x1 KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 + KEXEC_UPDATE_ELFCOREHDR = 0x4 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CAPABILITIES = 0x1f KEYCTL_CAPS0_BIG_KEY = 0x10 @@ -1795,6 +1801,7 @@ const ( LOCK_SH = 0x1 LOCK_UN = 0x8 LOOP_CLR_FD = 0x4c01 + LOOP_CONFIGURE = 0x4c0a LOOP_CTL_ADD = 0x4c80 LOOP_CTL_GET_FREE = 0x4c82 LOOP_CTL_REMOVE = 0x4c81 @@ -2120,6 +2127,60 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_CHAIN_FLAGS = 0x7 + NFT_CHAIN_MAXNAMELEN = 0x100 + NFT_CT_MAX = 0x17 + NFT_DATA_RESERVED_MASK = 0xffffff00 + NFT_DATA_VALUE_MAXLEN = 0x40 + NFT_EXTHDR_OP_MAX = 0x4 + NFT_FIB_RESULT_MAX = 0x3 + NFT_INNER_MASK = 0xf + NFT_LOGLEVEL_MAX = 0x8 + NFT_NAME_MAXLEN = 0x100 + NFT_NG_MAX = 0x1 + NFT_OBJECT_CONNLIMIT = 0x5 + NFT_OBJECT_COUNTER = 0x1 + NFT_OBJECT_CT_EXPECT = 0x9 + NFT_OBJECT_CT_HELPER = 0x3 + NFT_OBJECT_CT_TIMEOUT = 0x7 + NFT_OBJECT_LIMIT = 0x4 + NFT_OBJECT_MAX = 0xa + NFT_OBJECT_QUOTA = 0x2 + NFT_OBJECT_SECMARK = 0x8 + NFT_OBJECT_SYNPROXY = 0xa + NFT_OBJECT_TUNNEL = 0x6 + NFT_OBJECT_UNSPEC = 0x0 + NFT_OBJ_MAXNAMELEN = 0x100 + NFT_OSF_MAXGENRELEN = 0x10 + NFT_QUEUE_FLAG_BYPASS = 0x1 + NFT_QUEUE_FLAG_CPU_FANOUT = 0x2 + NFT_QUEUE_FLAG_MASK = 0x3 + NFT_REG32_COUNT = 0x10 + NFT_REG32_SIZE = 0x4 + NFT_REG_MAX = 0x4 + NFT_REG_SIZE = 0x10 + NFT_REJECT_ICMPX_MAX = 0x3 + NFT_RT_MAX = 0x4 + NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SET_MAXNAMELEN = 0x100 + NFT_SOCKET_MAX = 0x3 + NFT_TABLE_F_MASK = 0x3 + NFT_TABLE_MAXNAMELEN = 0x100 + NFT_TRACETYPE_MAX = 0x3 + NFT_TUNNEL_F_MASK = 0x7 + NFT_TUNNEL_MAX = 0x1 + NFT_TUNNEL_MODE_MAX = 0x2 + NFT_USERDATA_MAXLEN = 0x100 + NFT_XFRM_KEY_MAX = 0x6 + NF_NAT_RANGE_MAP_IPS = 0x1 + NF_NAT_RANGE_MASK = 0x7f + NF_NAT_RANGE_NETMAP = 0x40 + NF_NAT_RANGE_PERSISTENT = 0x8 + NF_NAT_RANGE_PROTO_OFFSET = 0x20 + NF_NAT_RANGE_PROTO_RANDOM = 0x4 + NF_NAT_RANGE_PROTO_RANDOM_ALL = 0x14 + NF_NAT_RANGE_PROTO_RANDOM_FULLY = 0x10 + NF_NAT_RANGE_PROTO_SPECIFIED = 0x2 NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 @@ -2275,6 +2336,7 @@ const ( PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd PERF_MEM_LVLNUM_SHIFT = 0x21 + PERF_MEM_LVLNUM_UNC = 0x8 PERF_MEM_LVL_HIT = 0x2 PERF_MEM_LVL_IO = 0x1000 PERF_MEM_LVL_L1 = 0x8 @@ -3461,6 +3523,7 @@ const ( XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 + XDP_PKT_CONTD = 0x1 XDP_RING_NEED_WAKEUP = 0x1 XDP_RX_RING = 0x2 XDP_SHARED_UMEM = 0x1 @@ -3473,6 +3536,7 @@ const ( XDP_UMEM_REG = 0x4 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 + XDP_USE_SG = 0x10 XDP_ZEROCOPY = 0x4 XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 30aee00a..4920821c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/386/include -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 8ebfa512..a0c1e411 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/amd64/include -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 271a21cd..c6398556 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 910c330a..47cc62e2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/arm64/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a640798c..27ac4a09 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/loong64/include _const.go @@ -119,6 +118,7 @@ const ( IXOFF = 0x1000 IXON = 0x400 LASX_CTX_MAGIC = 0x41535801 + LBT_CTX_MAGIC = 0x42540001 LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 0d5925d3..54694642 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index d72a00e0..3adb81d7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 02ba129f..2dfe98f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 8daa6dd9..f5398f84 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/mipsle/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 63c8fa2f..c54f152d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 930799ec..76057dc7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 8605a7dd..e0c3725e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/ppc64le/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 95a016f1..18f2813e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/riscv64/include _const.go @@ -228,6 +227,9 @@ const ( PPPIOCUNBRIDGECHAN = 0x7434 PPPIOCXFERUNIT = 0x744e PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFDPIC = 0x21 + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 RLIMIT_AS = 0x9 RLIMIT_MEMLOCK = 0x8 RLIMIT_NOFILE = 0x7 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 1ae0108f..11619d4e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/s390x/include -fsigned-char _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 1bb7c633..396d994d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -Wall -Werror -static -I/tmp/sparc64/include _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 72f7420d..130085df 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 8d4eb0c0..84769a1a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 9eef9749..602ded00 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index 3b62ba19..efc0406e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index af20e474..5a6500f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 6015fcb2..a5aeeb97 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 8d44955e..0e9748a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ae16fe75..4f4449ab 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go index 03d90fe3..76a363f0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go index 8e2c51b1..43ca0cdf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go index 13d40303..b1b8bb20 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 1afee6a0..d2ddd317 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris // Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index fc7d0506..4dfd2e05 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on zerrors_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index 97f20ca2..586317c7 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. //go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index 0b5f7943..d7c881be 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. //go:build linux && (mips || mips64) -// +build linux -// +build mips mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index 2807f7e6..2d2de5d2 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. //go:build linux && (mipsle || mips64le) -// +build linux -// +build mipsle mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 281ea64e..5adc79fb 100644 --- a/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,8 +1,6 @@ // Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. //go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index d1d1d233..6ea64a3c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc -// +build aix,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index f99a18ad..99ee4399 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 -// +build aix,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go index c4d50ae5..b68a7836 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gc -// +build aix,ppc64,gc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go index 6903d3b0..0a87450b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64_gccgo.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build aix && ppc64 && gccgo -// +build aix,ppc64,gccgo package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 1cad561e..ccb02f24 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && amd64 -// +build darwin,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index b18edbd0..1b40b997 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build darwin && arm64 -// +build darwin,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0c67df64..aad65fc7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build dragonfly && amd64 -// +build dragonfly,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e6e05d14..c0096391 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && 386 -// +build freebsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 7508acca..7664df74 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && amd64 -// +build freebsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 7b56aead..ae099182 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm -// +build freebsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index cc623dca..11fd5d45 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && arm64 -// +build freebsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index 58184919..c3d2d653 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build freebsd && riscv64 -// +build freebsd,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index 6be25cd1..c698cbc0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build illumos && amd64 -// +build illumos,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1ff3aec7..1488d271 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -38,6 +37,21 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -2195,3 +2209,13 @@ func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { } return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_CACHESTAT, uintptr(fd), uintptr(unsafe.Pointer(crange)), uintptr(unsafe.Pointer(cstat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 07b549cc..4def3e9f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && 386 -// +build linux,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 5f481bf8..fef2bc8b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && amd64 -// +build linux,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 824cd52c..a9fd76a8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm -// +build linux,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index e77aecfe..46006502 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && arm64 -// +build linux,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go index 806ffd1e..c8987d26 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && loong64 -// +build linux,loong64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 961a3afb..921f4306 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips -// +build linux,mips package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ed05005e..44f06782 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64 -// +build linux,mips64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index d365b718..e7fa0abf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mips64le -// +build linux,mips64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index c3f1b8bb..8c512567 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && mipsle -// +build linux,mipsle package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go index a6574cf9..7392fd45 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc -// +build linux,ppc package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index f4099026..41180434 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64 -// +build linux,ppc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 9dfcc299..40c6ce7a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && ppc64le -// +build linux,ppc64le package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 0ab4f2ed..2cfe34ad 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && riscv64 -// +build linux,riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 6cde3223..61e6f070 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && s390x -// +build linux,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 5253d65b..834b8420 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build linux && sparc64 -// +build linux,sparc64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 2df3c5ba..e91ebc14 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && 386 -// +build netbsd,386 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index a60556ba..be28babb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && amd64 -// +build netbsd,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 9f788917..fb587e82 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm -// +build netbsd,arm package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 82a4cb2d..d576438b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build netbsd && arm64 -// +build netbsd,arm64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 66b3b645..9dc42410 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && 386 -// +build openbsd,386 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 3dcacd30..41b56173 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index c5c4cc11..0d3a0751 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && amd64 -// +build openbsd,amd64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 2763620b..4019a656 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 93bfbb32..c39f7776 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm -// +build openbsd,arm package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index c9223140..ac4af24f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4 DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $4 +DATA ·libc_getfsstat_trampoline_addr(SB)/4, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4 DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $4 +DATA ·libc_pledge_trampoline_addr(SB)/4, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $4 +DATA ·libc_unveil_trampoline_addr(SB)/4, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index a107b8fd..57571d07 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && arm64 -// +build openbsd,arm64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index a6bc32c9..f77d5321 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index c427de50..e62963e6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && mips64 -// +build openbsd,mips64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index b4e7bcea..fae140b6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 60c1a99a..00831354 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && ppc64 -// +build openbsd,ppc64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index ca3f7660..9d1e0ff0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -213,6 +213,12 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fcntl(SB) + RET +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ppoll(SB) RET @@ -801,8 +807,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_getfsstat(SB) + RET +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_utimensat(SB) RET GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_pledge(SB) + RET +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_unveil(SB) + RET +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 52eba360..79029ed5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build openbsd && riscv64 -// +build openbsd,riscv64 package unix @@ -585,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -2213,6 +2238,21 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getfsstat(stat *Statfs_t, bufsize uintptr, flags int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_getfsstat_trampoline_addr, uintptr(unsafe.Pointer(stat)), uintptr(bufsize), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_getfsstat_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_getfsstat getfsstat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2229,3 +2269,31 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error var libc_utimensat_trampoline_addr uintptr //go:cgo_import_dynamic libc_utimensat utimensat "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pledge(promises *byte, execpromises *byte) (err error) { + _, _, e1 := syscall_syscall(libc_pledge_trampoline_addr, uintptr(unsafe.Pointer(promises)), uintptr(unsafe.Pointer(execpromises)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pledge_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pledge pledge "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func unveil(path *byte, flags *byte) (err error) { + _, _, e1 := syscall_syscall(libc_unveil_trampoline_addr, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(flags)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_unveil_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_unveil unveil "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 477a7d5b..da115f9a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 @@ -668,7 +673,22 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_getfsstat(SB) +GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 +DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) + TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) + +TEXT libc_pledge_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pledge(SB) +GLOBL ·libc_pledge_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pledge_trampoline_addr(SB)/8, $libc_pledge_trampoline<>(SB) + +TEXT libc_unveil_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_unveil(SB) +GLOBL ·libc_unveil_trampoline_addr(SB), RODATA, $8 +DATA ·libc_unveil_trampoline_addr(SB)/8, $libc_unveil_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index b4018946..829b87fe 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build solaris && amd64 -// +build solaris,amd64 package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index 1d8fe1d4..94f01123 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 55e04847..3a58ae81 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index d2243cf8..dcb7a0eb 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 82dc51bd..db5a7bf1 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go index cbdda1a4..7be575a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go index f55eae1a..d6e3174c 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go index e4405447..ee97157d 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go index a0db82fc..35c3b91d 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go index f8298ff9..5edda768 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go index 5eb433bb..0dc9e8b4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go index 703675c0..308ddf3a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go index 4e0d9610..418664e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go index 01636b83..34d0b86d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go index ad99bc10..b71cf45e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go index 89dcc427..e32df1c1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go index ee37aaa0..15ad6111 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 9862853d..fcf3ecbd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix @@ -448,4 +447,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 8901f0f4..f56dc250 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix @@ -370,4 +369,6 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 + SYS_MAP_SHADOW_STACK = 453 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 6902c37e..974bf246 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix @@ -412,4 +411,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index a6d3dff8..39a2739e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix @@ -315,4 +314,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index b18f3f71..cf9c9d77 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix @@ -309,4 +308,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 0302e5e3..10b7362e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 6693ba4a..cd4d8b4f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index fd93f498..2c0efca8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix @@ -362,4 +361,5 @@ const ( SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 SYS_CACHESTAT = 5451 + SYS_FCHMODAT2 = 5452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 760ddcad..a72e31d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix @@ -432,4 +431,5 @@ const ( SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 SYS_CACHESTAT = 4451 + SYS_FCHMODAT2 = 4452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index cff2b255..c7d1e374 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix @@ -439,4 +438,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index a4b2405d..f4d4838c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index aca54b4e..b64f0e59 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix @@ -411,4 +410,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9d1738d6..95711195 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix @@ -316,4 +315,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 022878dc..f94e943b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix @@ -377,4 +376,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 4100a761..ba0c2bc5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix @@ -390,4 +389,5 @@ const ( SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 SYS_CACHESTAT = 451 + SYS_FCHMODAT2 = 452 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go index 3a6699eb..b2aa8cd4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go index 5677cd4f..524a1b1c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go index e784cb6d..d59b943a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go index bd4952ef..31e771d5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go index 59773381..9fd77c6c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index 16af2918..af10af28 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go index f59b18a9..cc2028af 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go index 721ef591..c06dd441 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go index 01c43a01..9ddbf3e0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go index f258cfa2..19a6ee41 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go index 07919e0e..05192a78 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go index 073daad4..b2e30858 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 7a8161c1..3e6d57ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && aix -// +build ppc,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index 07ed733c..3a219bdc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && aix -// +build ppc64,aix package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 690cefc3..091d107f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && darwin -// +build amd64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 5bffc10e..28ff4ef7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && darwin -// +build arm64,darwin package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index d0ba8e9b..30e405bb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && dragonfly -// +build amd64,dragonfly package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 29dc4833..6cbd094a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && freebsd -// +build 386,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 0a89b289..7c03b6ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && freebsd -// +build amd64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index c8666bb1..422107ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && freebsd -// +build arm,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 88fb48a8..505a12ac 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && freebsd -// +build arm64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 698dc975..cc986c79 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && freebsd -// +build riscv64,freebsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 18aa70b4..bbf8399f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1,7 +1,6 @@ // Code generated by mkmerge; DO NOT EDIT. //go:build linux -// +build linux package unix @@ -2672,6 +2671,7 @@ const ( BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e BPF_PROG_TYPE_SYSCALL = 0x1f + BPF_PROG_TYPE_NETFILTER = 0x20 BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2716,6 +2716,11 @@ const ( BPF_PERF_EVENT = 0x29 BPF_TRACE_KPROBE_MULTI = 0x2a BPF_LSM_CGROUP = 0x2b + BPF_STRUCT_OPS = 0x2c + BPF_NETFILTER = 0x2d + BPF_TCX_INGRESS = 0x2e + BPF_TCX_EGRESS = 0x2f + BPF_TRACE_UPROBE_MULTI = 0x30 BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2726,6 +2731,18 @@ const ( BPF_LINK_TYPE_PERF_EVENT = 0x7 BPF_LINK_TYPE_KPROBE_MULTI = 0x8 BPF_LINK_TYPE_STRUCT_OPS = 0x9 + BPF_LINK_TYPE_NETFILTER = 0xa + BPF_LINK_TYPE_TCX = 0xb + BPF_LINK_TYPE_UPROBE_MULTI = 0xc + BPF_PERF_EVENT_UNSPEC = 0x0 + BPF_PERF_EVENT_UPROBE = 0x1 + BPF_PERF_EVENT_URETPROBE = 0x2 + BPF_PERF_EVENT_KPROBE = 0x3 + BPF_PERF_EVENT_KRETPROBE = 0x4 + BPF_PERF_EVENT_TRACEPOINT = 0x5 + BPF_PERF_EVENT_EVENT = 0x6 + BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_UPROBE_MULTI_RETURN = 0x1 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2743,6 +2760,8 @@ const ( BPF_F_MMAPABLE = 0x400 BPF_F_PRESERVE_ELEMS = 0x800 BPF_F_INNER_MAP = 0x1000 + BPF_F_LINK = 0x2000 + BPF_F_PATH_FD = 0x4000 BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 @@ -2763,6 +2782,7 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_NO_TUNNEL_KEY = 0x10 BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff @@ -2779,6 +2799,8 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2867,6 +2889,8 @@ const ( BPF_DEVCG_DEV_CHAR = 0x2 BPF_FIB_LOOKUP_DIRECT = 0x1 BPF_FIB_LOOKUP_OUTPUT = 0x2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4 + BPF_FIB_LOOKUP_TBID = 0x8 BPF_FIB_LKUP_RET_SUCCESS = 0x0 BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 @@ -2902,6 +2926,7 @@ const ( BPF_CORE_ENUMVAL_EXISTS = 0xa BPF_CORE_ENUMVAL_VALUE = 0xb BPF_CORE_TYPE_MATCHES = 0xc + BPF_F_TIMER_ABS = 0x1 ) const ( @@ -2980,6 +3005,12 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } +type LoopConfig struct { + Fd uint32 + Size uint32 + Info LoopInfo64 + _ [8]uint64 +} type TIPCSocketAddr struct { Ref uint32 @@ -5883,3 +5914,15 @@ type SchedAttr struct { } const SizeofSchedAttr = 0x38 + +type Cachestat_t struct { + Cache uint64 + Dirty uint64 + Writeback uint64 + Evicted uint64 + Recently_evicted uint64 +} +type CachestatRange struct { + Off uint64 + Len uint64 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 6d8acbcc..438a30af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && linux -// +build 386,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 59293c68..adceca35 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && linux -// +build amd64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 40cfa38c..eeaa00a3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && linux -// +build arm,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 055bc421..6739aa91 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && linux -// +build arm64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index f28affbc..9920ef63 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build loong64 && linux -// +build loong64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 9d71e7cc..2923b799 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips && linux -// +build mips,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index fd5ccd33..ce2750ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && linux -// +build mips64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7704de77..3038811d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64le && linux -// +build mips64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index df00b875..efc6fed1 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mipsle && linux -// +build mipsle,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 0942840d..9a654b75 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc && linux -// +build ppc,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 03487439..40d358e3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && linux -// +build ppc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index bad06704..148c6ceb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64le && linux -// +build ppc64le,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 1b4c97c3..72ba8154 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && linux -// +build riscv64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index aa268d02..71e76550 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build s390x && linux -// +build s390x,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 444045b6..4abbdb9d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build sparc64 && linux -// +build sparc64,linux package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 9bc4c8f9..f22e7947 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && netbsd -// +build 386,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index bb05f655..066a7d83 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && netbsd -// +build amd64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index db40e3a1..439548ec 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && netbsd -// +build arm,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 11121151..16085d3b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && netbsd -// +build arm64,netbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 26eba23b..afd13a3a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build 386 && openbsd -// +build 386,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 5a547988..5d97f1f9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && openbsd -// +build amd64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index be58c4e1..34871cdc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm && openbsd -// +build arm,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 52338266..5911bceb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build arm64 && openbsd -// +build arm64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 605cfdb1..e4f24f3b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build mips64 && openbsd -// +build mips64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go index d6724c01..ca50a793 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_ppc64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build ppc64 && openbsd -// +build ppc64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go index ddfd27a4..d7d7f790 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_riscv64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build riscv64 && openbsd -// +build riscv64,openbsd package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 0400747c..14160576 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -2,7 +2,6 @@ // Code generated by the command above; see README.md. DO NOT EDIT. //go:build amd64 && solaris -// +build amd64,solaris package unix diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go index aec1efcb..54f31be6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build zos && s390x -// +build zos,s390x // Hand edited based on ztypes_linux_s390x.go // TODO: auto-generate. diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go index a20ebea6..ce2d713d 100644 --- a/vendor/golang.org/x/sys/windows/aliases.go +++ b/vendor/golang.org/x/sys/windows/aliases.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && go1.9 -// +build windows,go1.9 package windows diff --git a/vendor/golang.org/x/sys/windows/empty.s b/vendor/golang.org/x/sys/windows/empty.s index fdbbbcd3..ba64caca 100644 --- a/vendor/golang.org/x/sys/windows/empty.s +++ b/vendor/golang.org/x/sys/windows/empty.s @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.12 -// +build !go1.12 // This file is here to allow bodyless functions with go:linkname for Go 1.11 // and earlier (see https://golang.org/issue/23311). diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go index 2cd60645..6c366955 100644 --- a/vendor/golang.org/x/sys/windows/eventlog.go +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 8563f79c..dbcdb090 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package windows diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go index 9196b089..0f1bdc38 100644 --- a/vendor/golang.org/x/sys/windows/race.go +++ b/vendor/golang.org/x/sys/windows/race.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && race -// +build windows,race package windows diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go index 7bae4817..0c78da78 100644 --- a/vendor/golang.org/x/sys/windows/race0.go +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows && !race -// +build windows,!race package windows diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go index 6c8d97b6..fd863244 100644 --- a/vendor/golang.org/x/sys/windows/registry/key.go +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package registry provides access to the Windows registry. // diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go index ee74927d..bbf86ccf 100644 --- a/vendor/golang.org/x/sys/windows/registry/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build generate -// +build generate package registry diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go index 41733512..f533091c 100644 --- a/vendor/golang.org/x/sys/windows/registry/syscall.go +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go index 2789f6f1..74db26b9 100644 --- a/vendor/golang.org/x/sys/windows/registry/value.go +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package registry diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index c44a1b96..a9dc6308 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go index 4fc01434..6a4f9ce6 100644 --- a/vendor/golang.org/x/sys/windows/str.go +++ b/vendor/golang.org/x/sys/windows/str.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows package windows diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index 8732cdb9..e85ed6b9 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build windows -// +build windows // Package windows contains an interface to the low-level operating system // primitives. OS details vary depending on the underlying system, and diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 35cfc57c..ffb8708c 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -155,6 +155,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory +//sys RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory //sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW @@ -192,6 +194,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) +//sys SetFileValidData(handle Handle, validDataLength int64) (err error) //sys GetSystemTimeAsFileTime(time *Filetime) //sys GetSystemTimePreciseAsFileTime(time *Filetime) //sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] @@ -233,6 +236,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock //sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock //sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 +//sys GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) //sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW @@ -969,7 +973,8 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { if n > 0 { sl += int32(n) + 1 } - if sa.raw.Path[0] == '@' { + if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) { + // Check sl > 3 so we don't change unnamed socket behavior. sa.raw.Path[0] = 0 // Don't count trailing NUL for abstract address. sl-- diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index b88dc7c8..359780f6 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1094,7 +1094,33 @@ const ( SOMAXCONN = 0x7fffffff - TCP_NODELAY = 1 + TCP_NODELAY = 1 + TCP_EXPEDITED_1122 = 2 + TCP_KEEPALIVE = 3 + TCP_MAXSEG = 4 + TCP_MAXRT = 5 + TCP_STDURG = 6 + TCP_NOURG = 7 + TCP_ATMARK = 8 + TCP_NOSYNRETRIES = 9 + TCP_TIMESTAMPS = 10 + TCP_OFFLOAD_PREFERENCE = 11 + TCP_CONGESTION_ALGORITHM = 12 + TCP_DELAY_FIN_ACK = 13 + TCP_MAXRTMS = 14 + TCP_FASTOPEN = 15 + TCP_KEEPCNT = 16 + TCP_KEEPIDLE = TCP_KEEPALIVE + TCP_KEEPINTVL = 17 + TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18 + TCP_ICMP_ERROR_INFO = 19 + + UDP_NOCHECKSUM = 1 + UDP_SEND_MSG_SIZE = 2 + UDP_RECV_MAX_COALESCED_SIZE = 3 + UDP_CHECKSUM_COVERAGE = 20 + + UDP_COALESCED_INFO = 3 SHUT_RD = 0 SHUT_WR = 1 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8b1688de..e8791c82 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -184,6 +184,7 @@ var ( procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx") procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procAddDllDirectory = modkernel32.NewProc("AddDllDirectory") procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") @@ -253,6 +254,7 @@ var ( procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetFileTime = modkernel32.NewProc("GetFileTime") procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") @@ -329,6 +331,7 @@ var ( procReadProcessMemory = modkernel32.NewProc("ReadProcessMemory") procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procRemoveDllDirectory = modkernel32.NewProc("RemoveDllDirectory") procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") @@ -339,6 +342,7 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -1604,6 +1608,15 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) { return } +func AddDllDirectory(path *uint16) (cookie uintptr, err error) { + r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + cookie = uintptr(r0) + if cookie == 0 { + err = errnoErr(e1) + } + return +} + func AssignProcessToJobObject(job Handle, process Handle) (err error) { r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) if r1 == 0 { @@ -2185,6 +2198,14 @@ func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, return } +func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetFileType(filehandle Handle) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) n = uint32(r0) @@ -2870,6 +2891,14 @@ func RemoveDirectory(path *uint16) (err error) { return } +func RemoveDllDirectory(cookie uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ResetEvent(event Handle) (err error) { r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) if r1 == 0 { @@ -2960,6 +2989,14 @@ func SetEndOfFile(handle Handle) (err error) { return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { diff --git a/vendor/golang.org/x/term/term_unix.go b/vendor/golang.org/x/term/term_unix.go index 62c2b3f4..1ad0ddfe 100644 --- a/vendor/golang.org/x/term/term_unix.go +++ b/vendor/golang.org/x/term/term_unix.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos package term diff --git a/vendor/golang.org/x/term/term_unix_bsd.go b/vendor/golang.org/x/term/term_unix_bsd.go index 853b3d69..9dbf5462 100644 --- a/vendor/golang.org/x/term/term_unix_bsd.go +++ b/vendor/golang.org/x/term/term_unix_bsd.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build darwin || dragonfly || freebsd || netbsd || openbsd -// +build darwin dragonfly freebsd netbsd openbsd package term diff --git a/vendor/golang.org/x/term/term_unix_other.go b/vendor/golang.org/x/term/term_unix_other.go index 1e8955c9..1b36de79 100644 --- a/vendor/golang.org/x/term/term_unix_other.go +++ b/vendor/golang.org/x/term/term_unix_other.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build aix || linux || solaris || zos -// +build aix linux solaris zos package term diff --git a/vendor/golang.org/x/term/term_unsupported.go b/vendor/golang.org/x/term/term_unsupported.go index f1df8506..3c409e58 100644 --- a/vendor/golang.org/x/term/term_unsupported.go +++ b/vendor/golang.org/x/term/term_unsupported.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !zos && !windows && !solaris && !plan9 -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!zos,!windows,!solaris,!plan9 package term diff --git a/vendor/golang.org/x/text/cases/icu.go b/vendor/golang.org/x/text/cases/icu.go index 2dc84b39..db7c237c 100644 --- a/vendor/golang.org/x/text/cases/icu.go +++ b/vendor/golang.org/x/text/cases/icu.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build icu -// +build icu package cases diff --git a/vendor/golang.org/x/text/cases/tables10.0.0.go b/vendor/golang.org/x/text/cases/tables10.0.0.go index ca992310..bd28ae14 100644 --- a/vendor/golang.org/x/text/cases/tables10.0.0.go +++ b/vendor/golang.org/x/text/cases/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package cases diff --git a/vendor/golang.org/x/text/cases/tables11.0.0.go b/vendor/golang.org/x/text/cases/tables11.0.0.go index b1106b41..ce00ce37 100644 --- a/vendor/golang.org/x/text/cases/tables11.0.0.go +++ b/vendor/golang.org/x/text/cases/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package cases diff --git a/vendor/golang.org/x/text/cases/tables12.0.0.go b/vendor/golang.org/x/text/cases/tables12.0.0.go index ae7dc240..84d841b1 100644 --- a/vendor/golang.org/x/text/cases/tables12.0.0.go +++ b/vendor/golang.org/x/text/cases/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package cases diff --git a/vendor/golang.org/x/text/cases/tables13.0.0.go b/vendor/golang.org/x/text/cases/tables13.0.0.go index 68d2981d..6187e6b4 100644 --- a/vendor/golang.org/x/text/cases/tables13.0.0.go +++ b/vendor/golang.org/x/text/cases/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package cases diff --git a/vendor/golang.org/x/text/cases/tables15.0.0.go b/vendor/golang.org/x/text/cases/tables15.0.0.go index e431b995..aee0f310 100644 --- a/vendor/golang.org/x/text/cases/tables15.0.0.go +++ b/vendor/golang.org/x/text/cases/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package cases diff --git a/vendor/golang.org/x/text/cases/tables9.0.0.go b/vendor/golang.org/x/text/cases/tables9.0.0.go index 636d5d14..3aeb7be6 100644 --- a/vendor/golang.org/x/text/cases/tables9.0.0.go +++ b/vendor/golang.org/x/text/cases/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package cases diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go index 8a7392c4..784bb880 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule10.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go index bb0a9200..8e1e9439 100644 --- a/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go +++ b/vendor/golang.org/x/text/secure/bidirule/bidirule9.0.0.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 package bidirule diff --git a/vendor/golang.org/x/text/secure/precis/tables10.0.0.go b/vendor/golang.org/x/text/secure/precis/tables10.0.0.go index 81647495..93cbffac 100644 --- a/vendor/golang.org/x/text/secure/precis/tables10.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables11.0.0.go b/vendor/golang.org/x/text/secure/precis/tables11.0.0.go index a40e55d6..6cea210e 100644 --- a/vendor/golang.org/x/text/secure/precis/tables11.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables12.0.0.go b/vendor/golang.org/x/text/secure/precis/tables12.0.0.go index 254bbc79..1b506cdf 100644 --- a/vendor/golang.org/x/text/secure/precis/tables12.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables13.0.0.go b/vendor/golang.org/x/text/secure/precis/tables13.0.0.go index 7bc1a162..0a467f59 100644 --- a/vendor/golang.org/x/text/secure/precis/tables13.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables15.0.0.go b/vendor/golang.org/x/text/secure/precis/tables15.0.0.go index 48c32277..06286061 100644 --- a/vendor/golang.org/x/text/secure/precis/tables15.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package precis diff --git a/vendor/golang.org/x/text/secure/precis/tables9.0.0.go b/vendor/golang.org/x/text/secure/precis/tables9.0.0.go index 2292b7cb..0a104f79 100644 --- a/vendor/golang.org/x/text/secure/precis/tables9.0.0.go +++ b/vendor/golang.org/x/text/secure/precis/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package precis diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 42fa8d72..d2bd7118 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 56a0e1ea..f76bdca2 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go index baacf32b..3aa2c3bd 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go index ffadb7be..a7137579 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go index 92cce580..f15746f7 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go index f517fdb2..c164d379 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package bidi diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index f5a07882..1af161c7 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index cb7239c4..eb73ecc3 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go index 11b27330..276cb8d8 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go index f65785e8..0cceffd7 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go index e1858b87..b0819e42 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index 0175eae5..bf65457d 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package norm diff --git a/vendor/golang.org/x/text/width/tables10.0.0.go b/vendor/golang.org/x/text/width/tables10.0.0.go index cd9d91ca..07c1cb17 100644 --- a/vendor/golang.org/x/text/width/tables10.0.0.go +++ b/vendor/golang.org/x/text/width/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package width diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go index 327eaef9..89288b3d 100644 --- a/vendor/golang.org/x/text/width/tables11.0.0.go +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package width diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go index 5c14ade6..755ee912 100644 --- a/vendor/golang.org/x/text/width/tables12.0.0.go +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package width diff --git a/vendor/golang.org/x/text/width/tables13.0.0.go b/vendor/golang.org/x/text/width/tables13.0.0.go index b1fcb522..40c169ed 100644 --- a/vendor/golang.org/x/text/width/tables13.0.0.go +++ b/vendor/golang.org/x/text/width/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package width diff --git a/vendor/golang.org/x/text/width/tables15.0.0.go b/vendor/golang.org/x/text/width/tables15.0.0.go index 4b91e338..2b852896 100644 --- a/vendor/golang.org/x/text/width/tables15.0.0.go +++ b/vendor/golang.org/x/text/width/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package width diff --git a/vendor/golang.org/x/text/width/tables9.0.0.go b/vendor/golang.org/x/text/width/tables9.0.0.go index 6781f3d9..d981330a 100644 --- a/vendor/golang.org/x/text/width/tables9.0.0.go +++ b/vendor/golang.org/x/text/width/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package width diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index f0e0cf3c..8f6c7f49 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit { // or its associated context.Context is canceled. // // The methods AllowN, ReserveN, and WaitN consume n tokens. +// +// Limiter is safe for simultaneous use by multiple goroutines. type Limiter struct { mu sync.Mutex limit Limit diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go index b354c9e8..dcb5023a 100644 --- a/vendor/golang.org/x/tools/cmd/goimports/goimports.go +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -11,11 +11,10 @@ import ( "flag" "fmt" "go/scanner" - exec "golang.org/x/sys/execabs" "io" - "io/ioutil" "log" "os" + "os/exec" "path/filepath" "runtime" "runtime/pprof" @@ -106,7 +105,7 @@ func processFile(filename string, in io.Reader, out io.Writer, argType argumentT in = f } - src, err := ioutil.ReadAll(in) + src, err := io.ReadAll(in) if err != nil { return err } @@ -159,7 +158,7 @@ func processFile(filename string, in io.Reader, out io.Writer, argType argumentT if fi, err := os.Stat(filename); err == nil { perms = fi.Mode() & os.ModePerm } - err = ioutil.WriteFile(filename, res, perms) + err = os.WriteFile(filename, res, perms) if err != nil { return err } @@ -296,7 +295,7 @@ func gofmtMain() { } func writeTempFile(dir, prefix string, data []byte) (string, error) { - file, err := ioutil.TempFile(dir, prefix) + file, err := os.CreateTemp(dir, prefix) if err != nil { return "", err } diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go index 190a5653..3326646d 100644 --- a/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go @@ -19,8 +19,8 @@ func doTrace() func() { bw, flush := bufferedFileWriter(*traceProfile) trace.Start(bw) return func() { - flush() trace.Stop() + flush() } } return func() {} diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go index 998d1a51..2b19c93e 100644 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -188,6 +188,8 @@ type Generator struct { trimPrefix string lineComment bool + + logf func(format string, args ...interface{}) // test logging hook; nil when not testing } func (g *Generator) Printf(format string, args ...interface{}) { @@ -221,13 +223,14 @@ func (g *Generator) parsePackage(patterns []string, tags []string) { // in a separate pass? For later. Tests: false, BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + Logf: g.logf, } pkgs, err := packages.Load(cfg, patterns...) if err != nil { log.Fatal(err) } if len(pkgs) != 1 { - log.Fatalf("error: %d packages found", len(pkgs)) + log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) } g.addPackage(pkgs[0]) } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 9fa5aa19..2c4c4e23 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -11,8 +11,6 @@ import ( "go/ast" "go/token" "sort" - - "golang.org/x/tools/internal/typeparams" ) // PathEnclosingInterval returns the node that encloses the source @@ -322,7 +320,7 @@ func childrenOf(n ast.Node) []ast.Node { children = append(children, n.Recv) } children = append(children, n.Name) - if tparams := typeparams.ForFuncType(n.Type); tparams != nil { + if tparams := n.Type.TypeParams; tparams != nil { children = append(children, tparams) } if n.Type.Params != nil { @@ -377,7 +375,7 @@ func childrenOf(n ast.Node) []ast.Node { tok(n.Lbrack, len("[")), tok(n.Rbrack, len("]"))) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: children = append(children, tok(n.Lbrack, len("[")), tok(n.Rbrack, len("]"))) @@ -588,7 +586,7 @@ func NodeDescription(n ast.Node) string { return "decrement statement" case *ast.IndexExpr: return "index expression" - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: return "index list expression" case *ast.InterfaceType: return "interface type" diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go index f430b21b..58934f76 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -9,8 +9,6 @@ import ( "go/ast" "reflect" "sort" - - "golang.org/x/tools/internal/typeparams" ) // An ApplyFunc is invoked by Apply for each node n, even if n is nil, @@ -252,7 +250,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "X", nil, n.X) a.apply(n, "Index", nil, n.Index) - case *typeparams.IndexListExpr: + case *ast.IndexListExpr: a.apply(n, "X", nil, n.X) a.applyList(n, "Indices") @@ -293,7 +291,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. a.apply(n, "Fields", nil, n.Fields) case *ast.FuncType: - if tparams := typeparams.ForFuncType(n); tparams != nil { + if tparams := n.TypeParams; tparams != nil { a.apply(n, "TypeParams", nil, tparams) } a.apply(n, "Params", nil, n.Params) @@ -408,7 +406,7 @@ func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast. case *ast.TypeSpec: a.apply(n, "Doc", nil, n.Doc) a.apply(n, "Name", nil, n.Name) - if tparams := typeparams.ForTypeSpec(n); tparams != nil { + if tparams := n.TypeParams; tparams != nil { a.apply(n, "TypeParams", nil, tparams) } a.apply(n, "Type", nil, n.Type) diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 165ede0f..03543bd4 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -128,15 +128,14 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // (from "version"). Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + case 'i': // indexed, till go1.19 + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) return pkg, err diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index 18a002f8..333676b7 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -8,42 +8,46 @@ package packagesdriver import ( "context" "fmt" - "go/types" "strings" "golang.org/x/tools/internal/gocommand" ) -var debug = false - -func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { inv.Verb = "list" inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) var goarch, compiler string if rawErr != nil { - if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { - // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + rawErrMsg := rawErr.Error() + if strings.Contains(rawErrMsg, "cannot find main module") || + strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. + // All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? inv.Verb = "env" inv.Args = []string{"GOARCH"} envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { - return nil, enverr + return "", "", enverr } goarch = strings.TrimSpace(envout.String()) compiler = "gc" + } else if friendlyErr != nil { + return "", "", friendlyErr } else { - return nil, friendlyErr + // This should be unreachable, but be defensive + // in case RunRaw's error results are inconsistent. + return "", "", rawErr } } else { fields := strings.Fields(stdout.String()) if len(fields) < 2 { - return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", stdout.String(), stderr.String()) } goarch = fields[0] compiler = fields[1] } - return types.SizesFor(compiler, goarch), nil + return compiler, goarch, nil } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index da4ab89f..b2a0b7c6 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -5,12 +5,32 @@ /* Package packages loads Go packages for inspection and analysis. -The Load function takes as input a list of patterns and return a list of Package -structs describing individual packages matched by those patterns. -The LoadMode controls the amount of detail in the loaded packages. - -Load passes most patterns directly to the underlying build tool, -but all patterns with the prefix "query=", where query is a +The [Load] function takes as input a list of patterns and returns a +list of [Package] values describing individual packages matched by those +patterns. +A [Config] specifies configuration options, the most important of which is +the [LoadMode], which controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool. +The default build tool is the go command. +Its supported patterns are described at +https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns. + +Load may be used in Go projects that use alternative build systems, by +installing an appropriate "driver" program for the build system and +specifying its location in the GOPACKAGESDRIVER environment variable. +For example, +https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration +explains how to use the driver for Bazel. +The driver program is responsible for interpreting patterns in its +preferred notation and reporting information about the packages that +they identify. +(See driverRequest and driverResponse types for the JSON +schema used by the protocol. +Though the protocol is supported, these types are currently unexported; +see #64608 for a proposal to publish them.) + +Regardless of driver, all patterns with the prefix "query=", where query is a non-empty string of letters from [a-z], are reserved and may be interpreted as query operators. @@ -35,7 +55,7 @@ The Package struct provides basic information about the package, including - Imports, a map from source import strings to the Packages they name; - Types, the type information for the package's exported symbols; - Syntax, the parsed syntax trees for the package's source code; and - - TypeInfo, the result of a complete type-check of the package syntax trees. + - TypesInfo, the result of a complete type-check of the package syntax trees. (See the documentation for type Package for the complete list of fields and more detailed descriptions.) @@ -64,7 +84,7 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to the loader, so that the loader can interpret them +uninterpreted to [Load], so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. */ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 7242a0a7..7db1d129 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -12,8 +12,8 @@ import ( "bytes" "encoding/json" "fmt" - exec "golang.org/x/sys/execabs" "os" + "os/exec" "strings" ) diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 6bb7168d..cd375fbc 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,10 +9,9 @@ import ( "context" "encoding/json" "fmt" - "go/types" - "io/ioutil" "log" "os" + "os/exec" "path" "path/filepath" "reflect" @@ -22,7 +21,6 @@ import ( "sync" "unicode" - exec "golang.org/x/sys/execabs" "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" @@ -153,10 +151,10 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { - var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - // types.SizesFor always returns nil or a *types.StdSizes. - response.dr.Sizes, _ = sizes.(*types.StdSizes) + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + sizeserr = err + response.dr.Compiler = compiler + response.dr.Arch = arch sizeswg.Done() }() } @@ -210,62 +208,6 @@ extractQueries: } } - // Only use go/packages' overlay processing if we're using a Go version - // below 1.16. Otherwise, go list handles it. - if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { - modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return nil, err - } - - var containsCandidates []string - if len(containFiles) > 0 { - containsCandidates = append(containsCandidates, modifiedPkgs...) - containsCandidates = append(containsCandidates, needPkgs...) - } - if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { - return nil, err - } - // Check candidate packages for containFiles. - if len(containFiles) > 0 { - for _, id := range containsCandidates { - pkg, ok := response.seenPackages[id] - if !ok { - response.addPackage(&Package{ - ID: id, - Errors: []Error{{ - Kind: ListError, - Msg: fmt.Sprintf("package %s expected but not seen", id), - }}, - }) - continue - } - for _, f := range containFiles { - for _, g := range pkg.GoFiles { - if sameFile(f, g) { - response.addRoot(id) - } - } - } - } - } - // Add root for any package that matches a pattern. This applies only to - // packages that are modified by overlays, since they are not added as - // roots automatically. - for _, pattern := range restPatterns { - match := matchPattern(pattern) - for _, pkgID := range modifiedPkgs { - pkg, ok := response.seenPackages[pkgID] - if !ok { - continue - } - if match(pkg.PkgPath) { - response.addRoot(pkg.ID) - } - } - } - } - sizeswg.Wait() if sizeserr != nil { return nil, sizeserr @@ -273,24 +215,6 @@ extractQueries: return response.dr, nil } -func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { - if len(pkgs) == 0 { - return nil - } - dr, err := state.createDriverResponse(pkgs...) - if err != nil { - return err - } - for _, pkg := range dr.Packages { - response.addPackage(pkg) - } - _, needPkgs, err := state.processGolistOverlay(response) - if err != nil { - return err - } - return state.addNeededOverlayPackages(response, needPkgs) -} - func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. @@ -625,7 +549,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse } if pkg.PkgPath == "unsafe" { - pkg.GoFiles = nil // ignore fake unsafe.go file + pkg.CompiledGoFiles = nil // ignore fake unsafe.go file (#59929) + } else if len(pkg.CompiledGoFiles) == 0 { + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + pkg.CompiledGoFiles = pkg.GoFiles } // Assume go list emits only absolute paths for Dir. @@ -663,16 +592,12 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse response.Roots = append(response.Roots, pkg.ID) } - // Work around for pre-go.1.11 versions of go list. - // TODO(matloob): they should be handled by the fallback. - // Can we delete this? - if len(pkg.CompiledGoFiles) == 0 { - pkg.CompiledGoFiles = pkg.GoFiles - } - // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { addFilenameFromPos := func(pos string) bool { split := strings.Split(pos, ":") @@ -891,6 +816,15 @@ func golistargs(cfg *Config, words []string, goVersion int) []string { // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), } + + // golang/go#60456: with go1.21 and later, go list serves pgo variants, which + // can be costly to compute and may result in redundant processing for the + // caller. Disable these variants. If someone wants to add e.g. a NeedPGO + // mode flag, that should be a separate proposal. + if goVersion >= 21 { + fullargs = append(fullargs, "-pgo=off") + } + fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) @@ -1100,7 +1034,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err if len(state.cfg.Overlay) == 0 { return "", func() {}, nil } - dir, err := ioutil.TempDir("", "gopackages-*") + dir, err := os.MkdirTemp("", "gopackages-*") if err != nil { return "", nil, err } @@ -1119,7 +1053,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err // Create a unique filename for the overlaid files, to avoid // creating nested directories. noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") - f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + f, err := os.CreateTemp(dir, fmt.Sprintf("*-%s", noSeparator)) if err != nil { return "", func() {}, err } @@ -1137,7 +1071,7 @@ func (state *golistState) writeOverlays() (filename string, cleanup func(), err } // Write out the overlay file that contains the filepath mappings. filename = filepath.Join(dir, "overlay.json") - if err := ioutil.WriteFile(filename, b, 0665); err != nil { + if err := os.WriteFile(filename, b, 0665); err != nil { return "", func() {}, err } return filename, cleanup, nil diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 9576b472..d823c474 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -6,314 +6,11 @@ package packages import ( "encoding/json" - "fmt" - "go/parser" - "go/token" - "os" "path/filepath" - "regexp" - "sort" - "strconv" - "strings" "golang.org/x/tools/internal/gocommand" ) -// processGolistOverlay provides rudimentary support for adding -// files that don't exist on disk to an overlay. The results can be -// sometimes incorrect. -// TODO(matloob): Handle unsupported cases, including the following: -// - determining the correct package to add given a new import path -func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { - havePkgs := make(map[string]string) // importPath -> non-test package ID - needPkgsSet := make(map[string]bool) - modifiedPkgsSet := make(map[string]bool) - - pkgOfDir := make(map[string][]*Package) - for _, pkg := range response.dr.Packages { - // This is an approximation of import path to id. This can be - // wrong for tests, vendored packages, and a number of other cases. - havePkgs[pkg.PkgPath] = pkg.ID - dir, err := commonDir(pkg.GoFiles) - if err != nil { - return nil, nil, err - } - if dir != "" { - pkgOfDir[dir] = append(pkgOfDir[dir], pkg) - } - } - - // If no new imports are added, it is safe to avoid loading any needPkgs. - // Otherwise, it's hard to tell which package is actually being loaded - // (due to vendoring) and whether any modified package will show up - // in the transitive set of dependencies (because new imports are added, - // potentially modifying the transitive set of dependencies). - var overlayAddsImports bool - - // If both a package and its test package are created by the overlay, we - // need the real package first. Process all non-test files before test - // files, and make the whole process deterministic while we're at it. - var overlayFiles []string - for opath := range state.cfg.Overlay { - overlayFiles = append(overlayFiles, opath) - } - sort.Slice(overlayFiles, func(i, j int) bool { - iTest := strings.HasSuffix(overlayFiles[i], "_test.go") - jTest := strings.HasSuffix(overlayFiles[j], "_test.go") - if iTest != jTest { - return !iTest // non-tests are before tests. - } - return overlayFiles[i] < overlayFiles[j] - }) - for _, opath := range overlayFiles { - contents := state.cfg.Overlay[opath] - base := filepath.Base(opath) - dir := filepath.Dir(opath) - var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant - var testVariantOf *Package // if opath is a test file, this is the package it is testing - var fileExists bool - isTestFile := strings.HasSuffix(opath, "_test.go") - pkgName, ok := extractPackageName(opath, contents) - if !ok { - // Don't bother adding a file that doesn't even have a parsable package statement - // to the overlay. - continue - } - // If all the overlay files belong to a different package, change the - // package name to that package. - maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) - nextPackage: - for _, p := range response.dr.Packages { - if pkgName != p.Name && p.ID != "command-line-arguments" { - continue - } - for _, f := range p.GoFiles { - if !sameFile(filepath.Dir(f), dir) { - continue - } - // Make sure to capture information on the package's test variant, if needed. - if isTestFile && !hasTestFiles(p) { - // TODO(matloob): Are there packages other than the 'production' variant - // of a package that this can match? This shouldn't match the test main package - // because the file is generated in another directory. - testVariantOf = p - continue nextPackage - } else if !isTestFile && hasTestFiles(p) { - // We're examining a test variant, but the overlaid file is - // a non-test file. Because the overlay implementation - // (currently) only adds a file to one package, skip this - // package, so that we can add the file to the production - // variant of the package. (https://golang.org/issue/36857 - // tracks handling overlays on both the production and test - // variant of a package). - continue nextPackage - } - if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // We have already seen the production version of the - // for which p is a test variant. - if hasTestFiles(p) { - testVariantOf = pkg - } - } - pkg = p - if filepath.Base(f) == base { - fileExists = true - } - } - } - // The overlay could have included an entirely new package or an - // ad-hoc package. An ad-hoc package is one that we have manually - // constructed from inadequate `go list` results for a file= query. - // It will have the ID command-line-arguments. - if pkg == nil || pkg.ID == "command-line-arguments" { - // Try to find the module or gopath dir the file is contained in. - // Then for modules, add the module opath to the beginning. - pkgPath, ok, err := state.getPkgPath(dir) - if err != nil { - return nil, nil, err - } - if !ok { - break - } - var forTest string // only set for x tests - isXTest := strings.HasSuffix(pkgName, "_test") - if isXTest { - forTest = pkgPath - pkgPath += "_test" - } - id := pkgPath - if isTestFile { - if isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) - } else { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - } - if pkg != nil { - // TODO(rstambler): We should change the package's path and ID - // here. The only issue is that this messes with the roots. - } else { - // Try to reclaim a package with the same ID, if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break - } - } - // Otherwise, create a new package. - if pkg == nil { - pkg = &Package{ - PkgPath: pkgPath, - ID: id, - Name: pkgName, - Imports: make(map[string]*Package), - } - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} - } - } - if isXTest { - pkg.forTest = forTest - } - } - } - } - if !fileExists { - pkg.GoFiles = append(pkg.GoFiles, opath) - // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior - // if the file will be ignored due to its build tags. - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) - modifiedPkgsSet[pkg.ID] = true - } - imports, err := extractImports(opath, contents) - if err != nil { - // Let the parser or type checker report errors later. - continue - } - for _, imp := range imports { - // TODO(rstambler): If the package is an x test and the import has - // a test variant, make sure to replace it. - if _, found := pkg.Imports[imp]; found { - continue - } - overlayAddsImports = true - id, ok := havePkgs[imp] - if !ok { - var err error - id, err = state.resolveImport(dir, imp) - if err != nil { - return nil, nil, err - } - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as well. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} - } - } - } - - // toPkgPath guesses the package path given the id. - toPkgPath := func(sourceDir, id string) (string, error) { - if i := strings.IndexByte(id, ' '); i >= 0 { - return state.resolveImport(sourceDir, id[:i]) - } - return state.resolveImport(sourceDir, id) - } - - // Now that new packages have been created, do another pass to determine - // the new set of missing packages. - for _, pkg := range response.dr.Packages { - for _, imp := range pkg.Imports { - if len(pkg.GoFiles) == 0 { - return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) - } - pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) - if err != nil { - return nil, nil, err - } - if _, ok := havePkgs[pkgPath]; !ok { - needPkgsSet[pkgPath] = true - } - } - } - - if overlayAddsImports { - needPkgs = make([]string, 0, len(needPkgsSet)) - for pkg := range needPkgsSet { - needPkgs = append(needPkgs, pkg) - } - } - modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) - for pkg := range modifiedPkgsSet { - modifiedPkgs = append(modifiedPkgs, pkg) - } - return modifiedPkgs, needPkgs, err -} - -// resolveImport finds the ID of a package given its import path. -// In particular, it will find the right vendored copy when in GOPATH mode. -func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { - env, err := state.getEnv() - if err != nil { - return "", err - } - if env["GOMOD"] != "" { - return importPath, nil - } - - searchDir := sourceDir - for { - vendorDir := filepath.Join(searchDir, "vendor") - exists, ok := state.vendorDirs[vendorDir] - if !ok { - info, err := os.Stat(vendorDir) - exists = err == nil && info.IsDir() - state.vendorDirs[vendorDir] = exists - } - - if exists { - vendoredPath := filepath.Join(vendorDir, importPath) - if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { - // We should probably check for .go files here, but shame on anyone who fools us. - path, ok, err := state.getPkgPath(vendoredPath) - if err != nil { - return "", err - } - if ok { - return path, nil - } - } - } - - // We know we've hit the top of the filesystem when we Dir / and get /, - // or C:\ and get C:\, etc. - next := filepath.Dir(searchDir) - if next == searchDir { - break - } - searchDir = next - } - return importPath, nil -} - -func hasTestFiles(p *Package) bool { - for _, f := range p.GoFiles { - if strings.HasSuffix(f, "_test.go") { - return true - } - } - return false -} - // determineRootDirs returns a mapping from absolute directories that could // contain code to their corresponding import path prefixes. func (state *golistState) determineRootDirs() (map[string]string, error) { @@ -384,192 +81,3 @@ func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { } return m, nil } - -func extractImports(filename string, contents []byte) ([]string, error) { - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? - if err != nil { - return nil, err - } - var res []string - for _, imp := range f.Imports { - quotedPath := imp.Path.Value - path, err := strconv.Unquote(quotedPath) - if err != nil { - return nil, err - } - res = append(res, path) - } - return res, nil -} - -// reclaimPackage attempts to reuse a package that failed to load in an overlay. -// -// If the package has errors and has no Name, GoFiles, or Imports, -// then it's possible that it doesn't yet exist on disk. -func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - if pkg.ID != id { - return false - } - if len(pkg.Errors) != 1 { - return false - } - if pkg.Name != "" || pkg.ExportFile != "" { - return false - } - if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { - return false - } - if len(pkg.Imports) > 0 { - return false - } - pkgName, ok := extractPackageName(filename, contents) - if !ok { - return false - } - pkg.Name = pkgName - pkg.Errors = nil - return true -} - -func extractPackageName(filename string, contents []byte) (string, bool) { - // TODO(rstambler): Check the message of the actual error? - // It differs between $GOPATH and module mode. - f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? - if err != nil { - return "", false - } - return f.Name.Name, true -} - -// commonDir returns the directory that all files are in, "" if files is empty, -// or an error if they aren't in the same directory. -func commonDir(files []string) (string, error) { - seen := make(map[string]bool) - for _, f := range files { - seen[filepath.Dir(f)] = true - } - if len(seen) > 1 { - return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) - } - for k := range seen { - // seen has only one element; return it. - return k, nil - } - return "", nil // no files -} - -// It is possible that the files in the disk directory dir have a different package -// name from newName, which is deduced from the overlays. If they all have a different -// package name, and they all have the same package name, then that name becomes -// the package name. -// It returns true if it changes the package name, false otherwise. -func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { - names := make(map[string]int) - for _, p := range pkgsOfDir { - names[p.Name]++ - } - if len(names) != 1 { - // some files are in different packages - return - } - var oldName string - for k := range names { - oldName = k - } - if newName == oldName { - return - } - // We might have a case where all of the package names in the directory are - // the same, but the overlay file is for an x test, which belongs to its - // own package. If the x test does not yet exist on disk, we may not yet - // have its package name on disk, but we should not rename the packages. - // - // We use a heuristic to determine if this file belongs to an x test: - // The test file should have a package name whose package name has a _test - // suffix or looks like "newName_test". - maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") - if isTestFile && maybeXTest { - return - } - for _, p := range pkgsOfDir { - p.Name = newName - } -} - -// This function is copy-pasted from -// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. -// It should be deleted when we remove support for overlays from go/packages. -// -// NOTE: This does not handle any ./... or ./ style queries, as this function -// doesn't know the working directory. -// -// matchPattern(pattern)(name) reports whether -// name matches pattern. Pattern is a limited glob -// pattern in which '...' means 'any string' and there -// is no other special syntax. -// Unfortunately, there are two special cases. Quoting "go help packages": -// -// First, /... at the end of the pattern can match an empty string, -// so that net/... matches both net and packages in its subdirectories, like net/http. -// Second, any slash-separated pattern element containing a wildcard never -// participates in a match of the "vendor" element in the path of a vendored -// package, so that ./... does not match packages in subdirectories of -// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. -// Note, however, that a directory named vendor that itself contains code -// is not a vendored package: cmd/vendor would be a command named vendor, -// and the pattern cmd/... matches it. -func matchPattern(pattern string) func(name string) bool { - // Convert pattern to regular expression. - // The strategy for the trailing /... is to nest it in an explicit ? expression. - // The strategy for the vendor exclusion is to change the unmatchable - // vendor strings to a disallowed code point (vendorChar) and to use - // "(anything but that codepoint)*" as the implementation of the ... wildcard. - // This is a bit complicated but the obvious alternative, - // namely a hand-written search like in most shell glob matchers, - // is too easy to make accidentally exponential. - // Using package regexp guarantees linear-time matching. - - const vendorChar = "\x00" - - if strings.Contains(pattern, vendorChar) { - return func(name string) bool { return false } - } - - re := regexp.QuoteMeta(pattern) - re = replaceVendor(re, vendorChar) - switch { - case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): - re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` - case re == vendorChar+`/\.\.\.`: - re = `(/vendor|/` + vendorChar + `/\.\.\.)` - case strings.HasSuffix(re, `/\.\.\.`): - re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` - } - re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) - - reg := regexp.MustCompile(`^` + re + `$`) - - return func(name string) bool { - if strings.Contains(name, vendorChar) { - return false - } - return reg.MatchString(replaceVendor(name, vendorChar)) - } -} - -// replaceVendor returns the result of replacing -// non-trailing vendor path elements in x with repl. -func replaceVendor(x, repl string) string { - if !strings.Contains(x, "vendor") { - return x - } - elem := strings.Split(x, "/") - for i := 0; i < len(elem)-1; i++ { - if elem[i] == "vendor" { - elem[i] = repl - } - } - return strings.Join(elem, "/") -} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0f1505b8..81e9e6a7 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -16,7 +16,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "log" "os" "path/filepath" @@ -28,8 +27,8 @@ import ( "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" - "golang.org/x/tools/internal/typeparams" "golang.org/x/tools/internal/typesinternal" + "golang.org/x/tools/internal/versions" ) // A LoadMode controls the amount of detail to return when loading. @@ -220,8 +219,10 @@ type driverResponse struct { // lists of multiple drivers, go/packages will fall back to the next driver. NotHandled bool - // Sizes, if not nil, is the types.Sizes to use when type checking. - Sizes *types.StdSizes + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string // Roots is the set of package IDs that make up the root packages. // We have to encode this separately because when we encode a single package @@ -257,31 +258,52 @@ type driverResponse struct { // proceeding with further analysis. The PrintErrors function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { - l := newLoader(cfg) - response, err := defaultDriver(&l.Config, patterns...) + ld := newLoader(cfg) + response, external, err := defaultDriver(&ld.Config, patterns...) if err != nil { return nil, err } - l.sizes = response.Sizes - return l.refine(response) + + ld.sizes = types.SizesFor(response.Compiler, response.Arch) + if ld.sizes == nil && ld.Config.Mode&(NeedTypes|NeedTypesSizes|NeedTypesInfo) != 0 { + // Type size information is needed but unavailable. + if external { + // An external driver may fail to populate the Compiler/GOARCH fields, + // especially since they are relatively new (see #63700). + // Provide a sensible fallback in this case. + ld.sizes = types.SizesFor("gc", runtime.GOARCH) + if ld.sizes == nil { // gccgo-only arch + ld.sizes = types.SizesFor("gc", "amd64") + } + } else { + // Go list should never fail to deliver accurate size information. + // Reject the whole Load since the error is the same for every package. + return nil, fmt.Errorf("can't determine type sizes for compiler %q on GOARCH %q", + response.Compiler, response.Arch) + } + } + + return ld.refine(response) } // defaultDriver is a driver that implements go/packages' fallback behavior. // It will try to request to an external driver, if one exists. If there's // no external driver, or the driver returns a response with NotHandled set, // defaultDriver will fall back to the go list driver. -func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - driver := findExternalDriver(cfg) - if driver == nil { - driver = goListDriver - } - response, err := driver(cfg, patterns...) - if err != nil { - return response, err - } else if response.NotHandled { - return goListDriver(cfg, patterns...) +// The boolean result indicates that an external driver handled the request. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) { + if driver := findExternalDriver(cfg); driver != nil { + response, err := driver(cfg, patterns...) + if err != nil { + return nil, false, err + } else if !response.NotHandled { + return response, true, nil + } + // (fall through) } - return response, nil + + response, err := goListDriver(cfg, patterns...) + return response, false, err } // A Package describes a loaded Go package. @@ -308,6 +330,9 @@ type Package struct { TypeErrors []types.Error // GoFiles lists the absolute file paths of the package's Go source files. + // It may include files that should not be compiled, for example because + // they contain non-matching build tags, are documentary pseudo-files such as + // unsafe/unsafe.go or builtin/builtin.go, or are subject to cgo preprocessing. GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source @@ -407,12 +432,6 @@ func init() { packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { return p.(*Package).depsErrors } - packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { - return config.(*Config).gocmdRunner - } - packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { - config.(*Config).gocmdRunner = runner - } packagesinternal.SetModFile = func(config interface{}, value string) { config.(*Config).modFile = value } @@ -549,7 +568,7 @@ type loaderPackage struct { type loader struct { pkgs map[string]*loaderPackage Config - sizes types.Sizes + sizes types.Sizes // non-nil if needed by mode parseCache map[string]*parseValue parseCacheMu sync.Mutex exportMu sync.Mutex // enforces mutual exclusion of exportdata operations @@ -627,7 +646,7 @@ func newLoader(cfg *Config) *loader { return ld } -// refine connects the supplied packages into a graph and then adds type and +// refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. func (ld *loader) refine(response *driverResponse) ([]*Package, error) { roots := response.Roots @@ -674,39 +693,38 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } } - // Materialize the import graph. - - const ( - white = 0 // new - grey = 1 // in progress - black = 2 // complete - ) - - // visit traverses the import graph, depth-first, - // and materializes the graph as Packages.Imports. - // - // Valid imports are saved in the Packages.Import map. - // Invalid imports (cycles and missing nodes) are saved in the importErrors map. - // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. - // - // visit returns whether the package needs src or has a transitive - // dependency on a package that does. These are the only packages - // for which we load source code. - var stack []*loaderPackage - var visit func(lpkg *loaderPackage) bool - var srcPkgs []*loaderPackage - visit = func(lpkg *loaderPackage) bool { - switch lpkg.color { - case black: - return lpkg.needsrc - case grey: - panic("internal error: grey node") - } - lpkg.color = grey - stack = append(stack, lpkg) // push - stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports - // If NeedImports isn't set, the imports fields will all be zeroed out. - if ld.Mode&NeedImports != 0 { + if ld.Mode&NeedImports != 0 { + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, + // the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports lpkg.Imports = make(map[string]*Package, len(stubs)) for importPath, ipkg := range stubs { var importErr error @@ -730,40 +748,39 @@ func (ld *loader) refine(response *driverResponse) ([]*Package, error) { } lpkg.Imports[importPath] = imp.Package } - } - if lpkg.needsrc { - srcPkgs = append(srcPkgs, lpkg) - } - if ld.Mode&NeedTypesSizes != 0 { - lpkg.TypesSizes = ld.sizes - } - stack = stack[:len(stack)-1] // pop - lpkg.color = black - return lpkg.needsrc - } + // Complete type information is required for the + // immediate dependencies of each source package. + if lpkg.needsrc && ld.Mode&NeedTypes != 0 { + for _, ipkg := range lpkg.Imports { + ld.pkgs[ipkg.ID].needtypes = true + } + } - if ld.Mode&NeedImports == 0 { - // We do this to drop the stub import packages that we are not even going to try to resolve. - for _, lpkg := range initial { - lpkg.Imports = nil + // NeedTypeSizes causes TypeSizes to be set even + // on packages for which types aren't needed. + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc } - } else { + // For each initial package, create its import DAG. for _, lpkg := range initial { visit(lpkg) } - } - if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { - for _, lpkg := range srcPkgs { - // Complete type information is required for the - // immediate dependencies of each source package. - for _, ipkg := range lpkg.Imports { - imp := ld.pkgs[ipkg.ID] - imp.needtypes = true - } + + } else { + // !NeedImports: drop the stub (ID-only) import packages + // that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil } } + // Load type data and syntax if needed, starting at // the initial packages (roots of the import DAG). if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { @@ -997,10 +1014,11 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Defs: make(map[*ast.Ident]types.Object), Uses: make(map[*ast.Ident]types.Object), Implicits: make(map[ast.Node]types.Object), + Instances: make(map[*ast.Ident]types.Instance), Scopes: make(map[ast.Node]*types.Scope), Selections: make(map[*ast.SelectorExpr]*types.Selection), } - typeparams.InitInstanceInfo(lpkg.TypesInfo) + versions.InitFileVersions(lpkg.TypesInfo) lpkg.TypesSizes = ld.sizes importer := importerFunc(func(path string) (*types.Package, error) { @@ -1038,7 +1056,10 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, Error: appendError, - Sizes: ld.sizes, + Sizes: ld.sizes, // may be nil + } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) } if (ld.Mode & typecheckCgo) != 0 { if !typesinternal.SetUsesCgo(tc) { @@ -1119,7 +1140,7 @@ func (ld *loader) parseFile(filename string) (*ast.File, error) { var err error if src == nil { ioLimit <- true // wait - src, err = ioutil.ReadFile(filename) + src, err = os.ReadFile(filename) <-ioLimit // signal } if err != nil { diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 00000000..11d5c8c3 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,752 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "strconv" + "strings" + + "golang.org/x/tools/internal/typeparams" +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTC]; +// one of these (TypeParam) requires an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects +} + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func (enc *Encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := obj.Type().(*types.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + objs := enc.scopeObjects(scope) + for _, o := range objs { + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, o.Name()...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path, nil); r != nil { + return Path(r), nil + } + } else { + if named, _ := T.(*types.Named); named != nil { + if r := findTypeParam(obj, named.TypeParams(), path, nil); r != nil { + // generic named type + return Path(r), nil + } + } + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, o := range objs { + path := append(empty, o.Name()...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + // The method index here is always with respect + // to the underlying go/types data structures, + // which ultimately derives from source order + // and must be preserved by export data. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if typeparams.OriginMethod(meth) != meth { + return "", false + } + + recvT := meth.Type().(*types.Signature).Recv().Type() + if ptr, ok := recvT.(*types.Pointer); ok { + recvT = ptr.Elem() + } + + named, ok := recvT.(*types.Named) + if !ok { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + + // Method indices are w.r.t. the go/types data structures, + // ultimately deriving from source order, + // which is preserved by export data. + for i := 0; i < named.NumMethods(); i++ { + if named.Method(i) == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Signature: + if r := findTypeParam(obj, T.TypeParams(), path, seen); r != nil { + return r + } + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults), seen) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == obj { + return path2 // found field var + } + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.TypeParam: + name := T.Obj() + if name == obj { + return append(path, opObj) + } + if seen[name] { + return nil + } + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, opTypeParam, i) + if r := find(obj, tparam, path2, seen); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + pathstr := string(p) + if pathstr == "" { + return nil, fmt.Errorf("empty path") + } + + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *types.TypeParamList + } + // abstraction of *types.{Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opConstraint: + tparam, ok := t.(*types.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo + if m == nil { + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m + } + objs, ok := m[scope] + if !ok { + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs + } + return objs +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/util.go b/vendor/golang.org/x/tools/internal/event/keys/util.go new file mode 100644 index 00000000..c0e8e731 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/util.go @@ -0,0 +1,21 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "sort" + "strings" +) + +// Join returns a canonical join of the keys in S: +// a sorted comma-separated string list. +func Join[S ~[]T, T ~string](s S) string { + strs := make([]string, 0, len(s)) + for _, v := range s { + strs = append(strs, string(v)) + } + sort.Strings(strs) + return strings.Join(strs, ",") +} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go new file mode 100644 index 00000000..581b26c2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go @@ -0,0 +1,59 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag provides the labels used for telemetry throughout gopls. +package tag + +import ( + "golang.org/x/tools/internal/event/keys" +) + +var ( + // create the label keys we use + Method = keys.NewString("method", "") + StatusCode = keys.NewString("status.code", "") + StatusMessage = keys.NewString("status.message", "") + RPCID = keys.NewString("id", "") + RPCDirection = keys.NewString("direction", "") + File = keys.NewString("file", "") + Directory = keys.New("directory", "") + URI = keys.New("URI", "") + Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs + PackagePath = keys.NewString("package_path", "") + Query = keys.New("query", "") + Snapshot = keys.NewUInt64("snapshot", "") + Operation = keys.NewString("operation", "") + + Position = keys.New("position", "") + Category = keys.NewString("category", "") + PackageCount = keys.NewInt("packages", "") + Files = keys.New("files", "") + Port = keys.NewInt("port", "") + Type = keys.New("type", "") + HoverKind = keys.NewString("hoverkind", "") + + NewServer = keys.NewString("new_server", "A new server was added") + EndServer = keys.NewString("end_server", "A server was shut down") + + ServerID = keys.NewString("server", "The server ID an event is related to") + Logfile = keys.NewString("logfile", "") + DebugAddress = keys.NewString("debug_address", "") + GoplsPath = keys.NewString("gopls_path", "") + ClientID = keys.NewString("client_id", "") + + Level = keys.NewInt("level", "The logging level") +) + +var ( + // create the stats we measure + Started = keys.NewInt64("started", "Count of started RPCs.") + ReceivedBytes = keys.NewInt64("received_bytes", "Bytes received.") //, unit.Bytes) + SentBytes = keys.NewInt64("sent_bytes", "Bytes sent.") //, unit.Bytes) + Latency = keys.NewFloat64("latency_ms", "Elapsed time in milliseconds") //, unit.Milliseconds) +) + +const ( + Inbound = "in" + Outbound = "out" +) diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go deleted file mode 100644 index 798fe599..00000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package fastwalk provides a faster version of filepath.Walk for file system -// scanning tools. -package fastwalk - -import ( - "errors" - "os" - "path/filepath" - "runtime" - "sync" -) - -// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the -// symlink named in the call may be traversed. -var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") - -// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the -// callback should not be called for any other files in the current directory. -// Child directories will still be traversed. -var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") - -// Walk is a faster implementation of filepath.Walk. -// -// filepath.Walk's design necessarily calls os.Lstat on each file, -// even if the caller needs less info. -// Many tools need only the type of each file. -// On some platforms, this information is provided directly by the readdir -// system call, avoiding the need to stat each file individually. -// fastwalk_unix.go contains a fork of the syscall routines. -// -// See golang.org/issue/16399 -// -// Walk walks the file tree rooted at root, calling walkFn for -// each file or directory in the tree, including root. -// -// If fastWalk returns filepath.SkipDir, the directory is skipped. -// -// Unlike filepath.Walk: -// - file stat calls must be done by the user. -// The only provided metadata is the file type, which does not include -// any permission bits. -// - multiple goroutines stat the filesystem concurrently. The provided -// walkFn must be safe for concurrent use. -// - fastWalk can follow symlinks if walkFn returns the TraverseLink -// sentinel error. It is the walkFn's responsibility to prevent -// fastWalk from going into symlink cycles. -func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { - // TODO(bradfitz): make numWorkers configurable? We used a - // minimum of 4 to give the kernel more info about multiple - // things we want, in hopes its I/O scheduling can take - // advantage of that. Hopefully most are in cache. Maybe 4 is - // even too low of a minimum. Profile more. - numWorkers := 4 - if n := runtime.NumCPU(); n > numWorkers { - numWorkers = n - } - - // Make sure to wait for all workers to finish, otherwise - // walkFn could still be called after returning. This Wait call - // runs after close(e.donec) below. - var wg sync.WaitGroup - defer wg.Wait() - - w := &walker{ - fn: walkFn, - enqueuec: make(chan walkItem, numWorkers), // buffered for performance - workc: make(chan walkItem, numWorkers), // buffered for performance - donec: make(chan struct{}), - - // buffered for correctness & not leaking goroutines: - resc: make(chan error, numWorkers), - } - defer close(w.donec) - - for i := 0; i < numWorkers; i++ { - wg.Add(1) - go w.doWork(&wg) - } - todo := []walkItem{{dir: root}} - out := 0 - for { - workc := w.workc - var workItem walkItem - if len(todo) == 0 { - workc = nil - } else { - workItem = todo[len(todo)-1] - } - select { - case workc <- workItem: - todo = todo[:len(todo)-1] - out++ - case it := <-w.enqueuec: - todo = append(todo, it) - case err := <-w.resc: - out-- - if err != nil { - return err - } - if out == 0 && len(todo) == 0 { - // It's safe to quit here, as long as the buffered - // enqueue channel isn't also readable, which might - // happen if the worker sends both another unit of - // work and its result before the other select was - // scheduled and both w.resc and w.enqueuec were - // readable. - select { - case it := <-w.enqueuec: - todo = append(todo, it) - default: - return nil - } - } - } - } -} - -// doWork reads directories as instructed (via workc) and runs the -// user's callback function. -func (w *walker) doWork(wg *sync.WaitGroup) { - defer wg.Done() - for { - select { - case <-w.donec: - return - case it := <-w.workc: - select { - case <-w.donec: - return - case w.resc <- w.walk(it.dir, !it.callbackDone): - } - } - } -} - -type walker struct { - fn func(path string, typ os.FileMode) error - - donec chan struct{} // closed on fastWalk's return - workc chan walkItem // to workers - enqueuec chan walkItem // from workers - resc chan error // from workers -} - -type walkItem struct { - dir string - callbackDone bool // callback already called; don't do it again -} - -func (w *walker) enqueue(it walkItem) { - select { - case w.enqueuec <- it: - case <-w.donec: - } -} - -func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { - joined := dirName + string(os.PathSeparator) + baseName - if typ == os.ModeDir { - w.enqueue(walkItem{dir: joined}) - return nil - } - - err := w.fn(joined, typ) - if typ == os.ModeSymlink { - if err == ErrTraverseLink { - // Set callbackDone so we don't call it twice for both the - // symlink-as-symlink and the symlink-as-directory later: - w.enqueue(walkItem{dir: joined, callbackDone: true}) - return nil - } - if err == filepath.SkipDir { - // Permit SkipDir on symlinks too. - return nil - } - } - return err -} - -func (w *walker) walk(root string, runUserCallback bool) error { - if runUserCallback { - err := w.fn(root, os.ModeDir) - if err == filepath.SkipDir { - return nil - } - if err != nil { - return err - } - } - - return readDir(root, w.onDirEnt) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go deleted file mode 100644 index 0ca55e0d..00000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_darwin.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build darwin && cgo -// +build darwin,cgo - -package fastwalk - -/* -#include - -// fastwalk_readdir_r wraps readdir_r so that we don't have to pass a dirent** -// result pointer which triggers CGO's "Go pointer to Go pointer" check unless -// we allocat the result dirent* with malloc. -// -// fastwalk_readdir_r returns 0 on success, -1 upon reaching the end of the -// directory, or a positive error number to indicate failure. -static int fastwalk_readdir_r(DIR *fd, struct dirent *entry) { - struct dirent *result; - int ret = readdir_r(fd, entry, &result); - if (ret == 0 && result == NULL) { - ret = -1; // EOF - } - return ret; -} -*/ -import "C" - -import ( - "os" - "syscall" - "unsafe" -) - -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fd, err := openDir(dirName) - if err != nil { - return &os.PathError{Op: "opendir", Path: dirName, Err: err} - } - defer C.closedir(fd) - - skipFiles := false - var dirent syscall.Dirent - for { - ret := int(C.fastwalk_readdir_r(fd, (*C.struct_dirent)(unsafe.Pointer(&dirent)))) - if ret != 0 { - if ret == -1 { - break // EOF - } - if ret == int(syscall.EINTR) { - continue - } - return &os.PathError{Op: "readdir", Path: dirName, Err: syscall.Errno(ret)} - } - if dirent.Ino == 0 { - continue - } - typ := dtToType(dirent.Type) - if skipFiles && typ.IsRegular() { - continue - } - name := (*[len(syscall.Dirent{}.Name)]byte)(unsafe.Pointer(&dirent.Name))[:] - name = name[:dirent.Namlen] - for i, c := range name { - if c == 0 { - name = name[:i] - break - } - } - // Check for useless names before allocating a string. - if string(name) == "." || string(name) == ".." { - continue - } - if err := fn(dirName, string(name), typ); err != nil { - if err != ErrSkipFiles { - return err - } - skipFiles = true - } - } - - return nil -} - -func dtToType(typ uint8) os.FileMode { - switch typ { - case syscall.DT_BLK: - return os.ModeDevice - case syscall.DT_CHR: - return os.ModeDevice | os.ModeCharDevice - case syscall.DT_DIR: - return os.ModeDir - case syscall.DT_FIFO: - return os.ModeNamedPipe - case syscall.DT_LNK: - return os.ModeSymlink - case syscall.DT_REG: - return 0 - case syscall.DT_SOCK: - return os.ModeSocket - } - return ^os.FileMode(0) -} - -// openDir wraps opendir(3) and handles any EINTR errors. The returned *DIR -// needs to be closed with closedir(3). -func openDir(path string) (*C.DIR, error) { - name, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - for { - fd, err := C.opendir((*C.char)(unsafe.Pointer(name))) - if err != syscall.EINTR { - return fd, err - } - } -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go deleted file mode 100644 index d58595db..00000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build freebsd || openbsd || netbsd -// +build freebsd openbsd netbsd - -package fastwalk - -import "syscall" - -func direntInode(dirent *syscall.Dirent) uint64 { - return uint64(dirent.Fileno) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go deleted file mode 100644 index d3922890..00000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (linux || (darwin && !cgo)) && !appengine -// +build linux darwin,!cgo -// +build !appengine - -package fastwalk - -import "syscall" - -func direntInode(dirent *syscall.Dirent) uint64 { - return dirent.Ino -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go deleted file mode 100644 index 38a4db6a..00000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (darwin && !cgo) || freebsd || openbsd || netbsd -// +build darwin,!cgo freebsd openbsd netbsd - -package fastwalk - -import "syscall" - -func direntNamlen(dirent *syscall.Dirent) uint64 { - return uint64(dirent.Namlen) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go deleted file mode 100644 index c82e57df..00000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux && !appengine -// +build linux,!appengine - -package fastwalk - -import ( - "bytes" - "syscall" - "unsafe" -) - -func direntNamlen(dirent *syscall.Dirent) uint64 { - const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) - nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) - const nameBufLen = uint16(len(nameBuf)) - limit := dirent.Reclen - fixedHdr - if limit > nameBufLen { - limit = nameBufLen - } - nameLen := bytes.IndexByte(nameBuf[:limit], 0) - if nameLen < 0 { - panic("failed to find terminating 0 byte in dirent") - } - return uint64(nameLen) -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go deleted file mode 100644 index 085d3116..00000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine || (!linux && !darwin && !freebsd && !openbsd && !netbsd) -// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd - -package fastwalk - -import ( - "io/ioutil" - "os" -) - -// readDir calls fn for each directory entry in dirName. -// It does not descend into directories or follow symlinks. -// If fn returns a non-nil error, readDir returns with that error -// immediately. -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fis, err := ioutil.ReadDir(dirName) - if err != nil { - return err - } - skipFiles := false - for _, fi := range fis { - if fi.Mode().IsRegular() && skipFiles { - continue - } - if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { - if err == ErrSkipFiles { - skipFiles = true - continue - } - return err - } - } - return nil -} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go deleted file mode 100644 index f12f1a73..00000000 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (linux || freebsd || openbsd || netbsd || (darwin && !cgo)) && !appengine -// +build linux freebsd openbsd netbsd darwin,!cgo -// +build !appengine - -package fastwalk - -import ( - "fmt" - "os" - "syscall" - "unsafe" -) - -const blockSize = 8 << 10 - -// unknownFileMode is a sentinel (and bogus) os.FileMode -// value used to represent a syscall.DT_UNKNOWN Dirent.Type. -const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice - -func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { - fd, err := open(dirName, 0, 0) - if err != nil { - return &os.PathError{Op: "open", Path: dirName, Err: err} - } - defer syscall.Close(fd) - - // The buffer must be at least a block long. - buf := make([]byte, blockSize) // stack-allocated; doesn't escape - bufp := 0 // starting read position in buf - nbuf := 0 // end valid data in buf - skipFiles := false - for { - if bufp >= nbuf { - bufp = 0 - nbuf, err = readDirent(fd, buf) - if err != nil { - return os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - return nil - } - } - consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) - bufp += consumed - if name == "" || name == "." || name == ".." { - continue - } - // Fallback for filesystems (like old XFS) that don't - // support Dirent.Type and have DT_UNKNOWN (0) there - // instead. - if typ == unknownFileMode { - fi, err := os.Lstat(dirName + "/" + name) - if err != nil { - // It got deleted in the meantime. - if os.IsNotExist(err) { - continue - } - return err - } - typ = fi.Mode() & os.ModeType - } - if skipFiles && typ.IsRegular() { - continue - } - if err := fn(dirName, name, typ); err != nil { - if err == ErrSkipFiles { - skipFiles = true - continue - } - return err - } - } -} - -func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { - // golang.org/issue/37269 - dirent := &syscall.Dirent{} - copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf) - if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { - panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) - } - if len(buf) < int(dirent.Reclen) { - panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) - } - consumed = int(dirent.Reclen) - if direntInode(dirent) == 0 { // File absent in directory. - return - } - switch dirent.Type { - case syscall.DT_REG: - typ = 0 - case syscall.DT_DIR: - typ = os.ModeDir - case syscall.DT_LNK: - typ = os.ModeSymlink - case syscall.DT_BLK: - typ = os.ModeDevice - case syscall.DT_FIFO: - typ = os.ModeNamedPipe - case syscall.DT_SOCK: - typ = os.ModeSocket - case syscall.DT_UNKNOWN: - typ = unknownFileMode - default: - // Skip weird things. - // It's probably a DT_WHT (http://lwn.net/Articles/325369/) - // or something. Revisit if/when this package is moved outside - // of goimports. goimports only cares about regular files, - // symlinks, and directories. - return - } - - nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) - nameLen := direntNamlen(dirent) - - // Special cases for common things: - if nameLen == 1 && nameBuf[0] == '.' { - name = "." - } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { - name = ".." - } else { - name = string(nameBuf[:nameLen]) - } - return -} - -// According to https://golang.org/doc/go1.14#runtime -// A consequence of the implementation of preemption is that on Unix systems, including Linux and macOS -// systems, programs built with Go 1.14 will receive more signals than programs built with earlier releases. -// -// This causes syscall.Open and syscall.ReadDirent sometimes fail with EINTR errors. -// We need to retry in this case. -func open(path string, mode int, perm uint32) (fd int, err error) { - for { - fd, err := syscall.Open(path, mode, perm) - if err != syscall.EINTR { - return fd, err - } - } -} - -func readDirent(fd int, buf []byte) (n int, err error) { - for { - nbuf, err := syscall.ReadDirent(fd, buf) - if err != syscall.EINTR { - return nbuf, err - } - } -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go deleted file mode 100644 index 30582ed6..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go +++ /dev/null @@ -1,852 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; -// see that file for specification of the format. - -package gcimporter - -import ( - "bytes" - "encoding/binary" - "fmt" - "go/constant" - "go/token" - "go/types" - "math" - "math/big" - "sort" - "strings" -) - -// If debugFormat is set, each integer and string value is preceded by a marker -// and position information in the encoding. This mechanism permits an importer -// to recognize immediately when it is out of sync. The importer recognizes this -// mode automatically (i.e., it can import export data produced with debugging -// support even if debugFormat is not set at the time of import). This mode will -// lead to massively larger export data (by a factor of 2 to 3) and should only -// be enabled during development and debugging. -// -// NOTE: This flag is the first flag to enable if importing dies because of -// (suspected) format errors, and whenever a change is made to the format. -const debugFormat = false // default: false - -// Current export format version. Increase with each format change. -// -// Note: The latest binary (non-indexed) export format is at version 6. -// This exporter is still at level 4, but it doesn't matter since -// the binary importer can handle older versions just fine. -// -// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE -// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE -// 4: type name objects support type aliases, uses aliasTag -// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) -// 2: removed unused bool in ODCL export (compiler only) -// 1: header format change (more regular), export package for _ struct fields -// 0: Go1.7 encoding -const exportVersion = 4 - -// trackAllTypes enables cycle tracking for all types, not just named -// types. The existing compiler invariants assume that unnamed types -// that are not completely set up are not used, or else there are spurious -// errors. -// If disabled, only named types are tracked, possibly leading to slightly -// less efficient encoding in rare cases. It also prevents the export of -// some corner-case type declarations (but those are not handled correctly -// with with the textual export format either). -// TODO(gri) enable and remove once issues caused by it are fixed -const trackAllTypes = false - -type exporter struct { - fset *token.FileSet - out bytes.Buffer - - // object -> index maps, indexed in order of serialization - strIndex map[string]int - pkgIndex map[*types.Package]int - typIndex map[types.Type]int - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - - // debugging support - written int // bytes written - indent int // for trace -} - -// internalError represents an error generated inside this package. -type internalError string - -func (e internalError) Error() string { return "gcimporter: " + string(e) } - -func internalErrorf(format string, args ...interface{}) error { - return internalError(fmt.Sprintf(format, args...)) -} - -// BExportData returns binary export data for pkg. -// If no file set is provided, position info will be missing. -func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { - if !debug { - defer func() { - if e := recover(); e != nil { - if ierr, ok := e.(internalError); ok { - err = ierr - return - } - // Not an internal error; panic again. - panic(e) - } - }() - } - - p := exporter{ - fset: fset, - strIndex: map[string]int{"": 0}, // empty string is mapped to 0 - pkgIndex: make(map[*types.Package]int), - typIndex: make(map[types.Type]int), - posInfoFormat: true, // TODO(gri) might become a flag, eventually - } - - // write version info - // The version string must start with "version %d" where %d is the version - // number. Additional debugging information may follow after a blank; that - // text is ignored by the importer. - p.rawStringln(fmt.Sprintf("version %d", exportVersion)) - var debug string - if debugFormat { - debug = "debug" - } - p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly - p.bool(trackAllTypes) - p.bool(p.posInfoFormat) - - // --- generic export data --- - - // populate type map with predeclared "known" types - for index, typ := range predeclared() { - p.typIndex[typ] = index - } - if len(p.typIndex) != len(predeclared()) { - return nil, internalError("duplicate entries in type map?") - } - - // write package data - p.pkg(pkg, true) - if trace { - p.tracef("\n") - } - - // write objects - objcount := 0 - scope := pkg.Scope() - for _, name := range scope.Names() { - if !token.IsExported(name) { - continue - } - if trace { - p.tracef("\n") - } - p.obj(scope.Lookup(name)) - objcount++ - } - - // indicate end of list - if trace { - p.tracef("\n") - } - p.tag(endTag) - - // for self-verification only (redundant) - p.int(objcount) - - if trace { - p.tracef("\n") - } - - // --- end of export data --- - - return p.out.Bytes(), nil -} - -func (p *exporter) pkg(pkg *types.Package, emptypath bool) { - if pkg == nil { - panic(internalError("unexpected nil pkg")) - } - - // if we saw the package before, write its index (>= 0) - if i, ok := p.pkgIndex[pkg]; ok { - p.index('P', i) - return - } - - // otherwise, remember the package, write the package tag (< 0) and package data - if trace { - p.tracef("P%d = { ", len(p.pkgIndex)) - defer p.tracef("} ") - } - p.pkgIndex[pkg] = len(p.pkgIndex) - - p.tag(packageTag) - p.string(pkg.Name()) - if emptypath { - p.string("") - } else { - p.string(pkg.Path()) - } -} - -func (p *exporter) obj(obj types.Object) { - switch obj := obj.(type) { - case *types.Const: - p.tag(constTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - p.value(obj.Val()) - - case *types.TypeName: - if obj.IsAlias() { - p.tag(aliasTag) - p.pos(obj) - p.qualifiedName(obj) - } else { - p.tag(typeTag) - } - p.typ(obj.Type()) - - case *types.Var: - p.tag(varTag) - p.pos(obj) - p.qualifiedName(obj) - p.typ(obj.Type()) - - case *types.Func: - p.tag(funcTag) - p.pos(obj) - p.qualifiedName(obj) - sig := obj.Type().(*types.Signature) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - - default: - panic(internalErrorf("unexpected object %v (%T)", obj, obj)) - } -} - -func (p *exporter) pos(obj types.Object) { - if !p.posInfoFormat { - return - } - - file, line := p.fileLine(obj) - if file == p.prevFile { - // common case: write line delta - // delta == 0 means different file or no line change - delta := line - p.prevLine - p.int(delta) - if delta == 0 { - p.int(-1) // -1 means no file change - } - } else { - // different file - p.int(0) - // Encode filename as length of common prefix with previous - // filename, followed by (possibly empty) suffix. Filenames - // frequently share path prefixes, so this can save a lot - // of space and make export data size less dependent on file - // path length. The suffix is unlikely to be empty because - // file names tend to end in ".go". - n := commonPrefixLen(p.prevFile, file) - p.int(n) // n >= 0 - p.string(file[n:]) // write suffix only - p.prevFile = file - p.int(line) - } - p.prevLine = line -} - -func (p *exporter) fileLine(obj types.Object) (file string, line int) { - if p.fset != nil { - pos := p.fset.Position(obj.Pos()) - file = pos.Filename - line = pos.Line - } - return -} - -func commonPrefixLen(a, b string) int { - if len(a) > len(b) { - a, b = b, a - } - // len(a) <= len(b) - i := 0 - for i < len(a) && a[i] == b[i] { - i++ - } - return i -} - -func (p *exporter) qualifiedName(obj types.Object) { - p.string(obj.Name()) - p.pkg(obj.Pkg(), false) -} - -func (p *exporter) typ(t types.Type) { - if t == nil { - panic(internalError("nil type")) - } - - // Possible optimization: Anonymous pointer types *T where - // T is a named type are common. We could canonicalize all - // such types *T to a single type PT = *T. This would lead - // to at most one *T entry in typIndex, and all future *T's - // would be encoded as the respective index directly. Would - // save 1 byte (pointerTag) per *T and reduce the typIndex - // size (at the cost of a canonicalization map). We can do - // this later, without encoding format change. - - // if we saw the type before, write its index (>= 0) - if i, ok := p.typIndex[t]; ok { - p.index('T', i) - return - } - - // otherwise, remember the type, write the type tag (< 0) and type data - if trackAllTypes { - if trace { - p.tracef("T%d = {>\n", len(p.typIndex)) - defer p.tracef("<\n} ") - } - p.typIndex[t] = len(p.typIndex) - } - - switch t := t.(type) { - case *types.Named: - if !trackAllTypes { - // if we don't track all types, track named types now - p.typIndex[t] = len(p.typIndex) - } - - p.tag(namedTag) - p.pos(t.Obj()) - p.qualifiedName(t.Obj()) - p.typ(t.Underlying()) - if !types.IsInterface(t) { - p.assocMethods(t) - } - - case *types.Array: - p.tag(arrayTag) - p.int64(t.Len()) - p.typ(t.Elem()) - - case *types.Slice: - p.tag(sliceTag) - p.typ(t.Elem()) - - case *dddSlice: - p.tag(dddTag) - p.typ(t.elem) - - case *types.Struct: - p.tag(structTag) - p.fieldList(t) - - case *types.Pointer: - p.tag(pointerTag) - p.typ(t.Elem()) - - case *types.Signature: - p.tag(signatureTag) - p.paramList(t.Params(), t.Variadic()) - p.paramList(t.Results(), false) - - case *types.Interface: - p.tag(interfaceTag) - p.iface(t) - - case *types.Map: - p.tag(mapTag) - p.typ(t.Key()) - p.typ(t.Elem()) - - case *types.Chan: - p.tag(chanTag) - p.int(int(3 - t.Dir())) // hack - p.typ(t.Elem()) - - default: - panic(internalErrorf("unexpected type %T: %s", t, t)) - } -} - -func (p *exporter) assocMethods(named *types.Named) { - // Sort methods (for determinism). - var methods []*types.Func - for i := 0; i < named.NumMethods(); i++ { - methods = append(methods, named.Method(i)) - } - sort.Sort(methodsByName(methods)) - - p.int(len(methods)) - - if trace && methods != nil { - p.tracef("associated methods {>\n") - } - - for i, m := range methods { - if trace && i > 0 { - p.tracef("\n") - } - - p.pos(m) - name := m.Name() - p.string(name) - if !exported(name) { - p.pkg(m.Pkg(), false) - } - - sig := m.Type().(*types.Signature) - p.paramList(types.NewTuple(sig.Recv()), false) - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) - p.int(0) // dummy value for go:nointerface pragma - ignored by importer - } - - if trace && methods != nil { - p.tracef("<\n} ") - } -} - -type methodsByName []*types.Func - -func (x methodsByName) Len() int { return len(x) } -func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } - -func (p *exporter) fieldList(t *types.Struct) { - if trace && t.NumFields() > 0 { - p.tracef("fields {>\n") - defer p.tracef("<\n} ") - } - - p.int(t.NumFields()) - for i := 0; i < t.NumFields(); i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.field(t.Field(i)) - p.string(t.Tag(i)) - } -} - -func (p *exporter) field(f *types.Var) { - if !f.IsField() { - panic(internalError("field expected")) - } - - p.pos(f) - p.fieldName(f) - p.typ(f.Type()) -} - -func (p *exporter) iface(t *types.Interface) { - // TODO(gri): enable importer to load embedded interfaces, - // then emit Embeddeds and ExplicitMethods separately here. - p.int(0) - - n := t.NumMethods() - if trace && n > 0 { - p.tracef("methods {>\n") - defer p.tracef("<\n} ") - } - p.int(n) - for i := 0; i < n; i++ { - if trace && i > 0 { - p.tracef("\n") - } - p.method(t.Method(i)) - } -} - -func (p *exporter) method(m *types.Func) { - sig := m.Type().(*types.Signature) - if sig.Recv() == nil { - panic(internalError("method expected")) - } - - p.pos(m) - p.string(m.Name()) - if m.Name() != "_" && !token.IsExported(m.Name()) { - p.pkg(m.Pkg(), false) - } - - // interface method; no need to encode receiver. - p.paramList(sig.Params(), sig.Variadic()) - p.paramList(sig.Results(), false) -} - -func (p *exporter) fieldName(f *types.Var) { - name := f.Name() - - if f.Anonymous() { - // anonymous field - we distinguish between 3 cases: - // 1) field name matches base type name and is exported - // 2) field name matches base type name and is not exported - // 3) field name doesn't match base type name (alias name) - bname := basetypeName(f.Type()) - if name == bname { - if token.IsExported(name) { - name = "" // 1) we don't need to know the field name or package - } else { - name = "?" // 2) use unexported name "?" to force package export - } - } else { - // 3) indicate alias and export name as is - // (this requires an extra "@" but this is a rare case) - p.string("@") - } - } - - p.string(name) - if name != "" && !token.IsExported(name) { - p.pkg(f.Pkg(), false) - } -} - -func basetypeName(typ types.Type) string { - switch typ := deref(typ).(type) { - case *types.Basic: - return typ.Name() - case *types.Named: - return typ.Obj().Name() - default: - return "" // unnamed type - } -} - -func (p *exporter) paramList(params *types.Tuple, variadic bool) { - // use negative length to indicate unnamed parameters - // (look at the first parameter only since either all - // names are present or all are absent) - n := params.Len() - if n > 0 && params.At(0).Name() == "" { - n = -n - } - p.int(n) - for i := 0; i < params.Len(); i++ { - q := params.At(i) - t := q.Type() - if variadic && i == params.Len()-1 { - t = &dddSlice{t.(*types.Slice).Elem()} - } - p.typ(t) - if n > 0 { - name := q.Name() - p.string(name) - if name != "_" { - p.pkg(q.Pkg(), false) - } - } - p.string("") // no compiler-specific info - } -} - -func (p *exporter) value(x constant.Value) { - if trace { - p.tracef("= ") - } - - switch x.Kind() { - case constant.Bool: - tag := falseTag - if constant.BoolVal(x) { - tag = trueTag - } - p.tag(tag) - - case constant.Int: - if v, exact := constant.Int64Val(x); exact { - // common case: x fits into an int64 - use compact encoding - p.tag(int64Tag) - p.int64(v) - return - } - // uncommon case: large x - use float encoding - // (powers of 2 will be encoded efficiently with exponent) - p.tag(floatTag) - p.float(constant.ToFloat(x)) - - case constant.Float: - p.tag(floatTag) - p.float(x) - - case constant.Complex: - p.tag(complexTag) - p.float(constant.Real(x)) - p.float(constant.Imag(x)) - - case constant.String: - p.tag(stringTag) - p.string(constant.StringVal(x)) - - case constant.Unknown: - // package contains type errors - p.tag(unknownTag) - - default: - panic(internalErrorf("unexpected value %v (%T)", x, x)) - } -} - -func (p *exporter) float(x constant.Value) { - if x.Kind() != constant.Float { - panic(internalErrorf("unexpected constant %v, want float", x)) - } - // extract sign (there is no -0) - sign := constant.Sign(x) - if sign == 0 { - // x == 0 - p.int(0) - return - } - // x != 0 - - var f big.Float - if v, exact := constant.Float64Val(x); exact { - // float64 - f.SetFloat64(v) - } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { - // TODO(gri): add big.Rat accessor to constant.Value. - r := valueToRat(num) - f.SetRat(r.Quo(r, valueToRat(denom))) - } else { - // Value too large to represent as a fraction => inaccessible. - // TODO(gri): add big.Float accessor to constant.Value. - f.SetFloat64(math.MaxFloat64) // FIXME - } - - // extract exponent such that 0.5 <= m < 1.0 - var m big.Float - exp := f.MantExp(&m) - - // extract mantissa as *big.Int - // - set exponent large enough so mant satisfies mant.IsInt() - // - get *big.Int from mant - m.SetMantExp(&m, int(m.MinPrec())) - mant, acc := m.Int(nil) - if acc != big.Exact { - panic(internalError("internal error")) - } - - p.int(sign) - p.int(exp) - p.string(string(mant.Bytes())) -} - -func valueToRat(x constant.Value) *big.Rat { - // Convert little-endian to big-endian. - // I can't believe this is necessary. - bytes := constant.Bytes(x) - for i := 0; i < len(bytes)/2; i++ { - bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] - } - return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) -} - -func (p *exporter) bool(b bool) bool { - if trace { - p.tracef("[") - defer p.tracef("= %v] ", b) - } - - x := 0 - if b { - x = 1 - } - p.int(x) - return b -} - -// ---------------------------------------------------------------------------- -// Low-level encoders - -func (p *exporter) index(marker byte, index int) { - if index < 0 { - panic(internalError("invalid index < 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%c%d ", marker, index) - } - p.rawInt64(int64(index)) -} - -func (p *exporter) tag(tag int) { - if tag >= 0 { - panic(internalError("invalid tag >= 0")) - } - if debugFormat { - p.marker('t') - } - if trace { - p.tracef("%s ", tagString[-tag]) - } - p.rawInt64(int64(tag)) -} - -func (p *exporter) int(x int) { - p.int64(int64(x)) -} - -func (p *exporter) int64(x int64) { - if debugFormat { - p.marker('i') - } - if trace { - p.tracef("%d ", x) - } - p.rawInt64(x) -} - -func (p *exporter) string(s string) { - if debugFormat { - p.marker('s') - } - if trace { - p.tracef("%q ", s) - } - // if we saw the string before, write its index (>= 0) - // (the empty string is mapped to 0) - if i, ok := p.strIndex[s]; ok { - p.rawInt64(int64(i)) - return - } - // otherwise, remember string and write its negative length and bytes - p.strIndex[s] = len(p.strIndex) - p.rawInt64(-int64(len(s))) - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } -} - -// marker emits a marker byte and position information which makes -// it easy for a reader to detect if it is "out of sync". Used for -// debugFormat format only. -func (p *exporter) marker(m byte) { - p.rawByte(m) - // Enable this for help tracking down the location - // of an incorrect marker when running in debugFormat. - if false && trace { - p.tracef("#%d ", p.written) - } - p.rawInt64(int64(p.written)) -} - -// rawInt64 should only be used by low-level encoders. -func (p *exporter) rawInt64(x int64) { - var tmp [binary.MaxVarintLen64]byte - n := binary.PutVarint(tmp[:], x) - for i := 0; i < n; i++ { - p.rawByte(tmp[i]) - } -} - -// rawStringln should only be used to emit the initial version string. -func (p *exporter) rawStringln(s string) { - for i := 0; i < len(s); i++ { - p.rawByte(s[i]) - } - p.rawByte('\n') -} - -// rawByte is the bottleneck interface to write to p.out. -// rawByte escapes b as follows (any encoding does that -// hides '$'): -// -// '$' => '|' 'S' -// '|' => '|' '|' -// -// Necessary so other tools can find the end of the -// export data by searching for "$$". -// rawByte should only be used by low-level encoders. -func (p *exporter) rawByte(b byte) { - switch b { - case '$': - // write '$' as '|' 'S' - b = 'S' - fallthrough - case '|': - // write '|' as '|' '|' - p.out.WriteByte('|') - p.written++ - } - p.out.WriteByte(b) - p.written++ -} - -// tracef is like fmt.Printf but it rewrites the format string -// to take care of indentation. -func (p *exporter) tracef(format string, args ...interface{}) { - if strings.ContainsAny(format, "<>\n") { - var buf bytes.Buffer - for i := 0; i < len(format); i++ { - // no need to deal with runes - ch := format[i] - switch ch { - case '>': - p.indent++ - continue - case '<': - p.indent-- - continue - } - buf.WriteByte(ch) - if ch == '\n' { - for j := p.indent; j > 0; j-- { - buf.WriteString(". ") - } - } - } - format = buf.String() - } - fmt.Printf(format, args...) -} - -// Debugging support. -// (tagString is only used when tracing is enabled) -var tagString = [...]string{ - // Packages - -packageTag: "package", - - // Types - -namedTag: "named type", - -arrayTag: "array", - -sliceTag: "slice", - -dddTag: "ddd", - -structTag: "struct", - -pointerTag: "pointer", - -signatureTag: "signature", - -interfaceTag: "interface", - -mapTag: "map", - -chanTag: "chan", - - // Values - -falseTag: "false", - -trueTag: "true", - -int64Tag: "int64", - -floatTag: "float", - -fractionTag: "fraction", - -complexTag: "complex", - -stringTag: "string", - -unknownTag: "unknown", - - // Type aliases - -aliasTag: "alias", -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index b85de014..d98b0db2 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -2,340 +2,24 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. +// This file contains the remaining vestiges of +// $GOROOT/src/go/internal/gcimporter/bimport.go. package gcimporter import ( - "encoding/binary" "fmt" - "go/constant" "go/token" "go/types" - "sort" - "strconv" - "strings" "sync" - "unicode" - "unicode/utf8" ) -type importer struct { - imports map[string]*types.Package - data []byte - importpath string - buf []byte // for reading strings - version int // export format version - - // object lists - strList []string // in order of appearance - pathList []string // in order of appearance - pkgList []*types.Package // in order of appearance - typList []types.Type // in order of appearance - interfaceList []*types.Interface // for delayed completion only - trackAllTypes bool - - // position encoding - posInfoFormat bool - prevFile string - prevLine int - fake fakeFileSet - - // debugging support - debugFormat bool - read int // bytes read -} - -// BImportData imports a package from the serialized package data -// and returns the number of bytes consumed and a reference to the package. -// If the export data version is not recognized or the format is otherwise -// compromised, an error is returned. -func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - // catch panics and return them as errors - const currentVersion = 6 - version := -1 // unknown version - defer func() { - if e := recover(); e != nil { - // Return a (possibly nil or incomplete) package unchanged (see #16088). - if version > currentVersion { - err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) - } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) - } - } - }() - - p := importer{ - imports: imports, - data: data, - importpath: path, - version: version, - strList: []string{""}, // empty string is mapped to 0 - pathList: []string{""}, // empty string is mapped to 0 - fake: fakeFileSet{ - fset: fset, - files: make(map[string]*fileInfo), - }, - } - defer p.fake.setLines() // set lines for files in fset - - // read version info - var versionstr string - if b := p.rawByte(); b == 'c' || b == 'd' { - // Go1.7 encoding; first byte encodes low-level - // encoding format (compact vs debug). - // For backward-compatibility only (avoid problems with - // old installed packages). Newly compiled packages use - // the extensible format string. - // TODO(gri) Remove this support eventually; after Go1.8. - if b == 'd' { - p.debugFormat = true - } - p.trackAllTypes = p.rawByte() == 'a' - p.posInfoFormat = p.int() != 0 - versionstr = p.string() - if versionstr == "v1" { - version = 0 - } - } else { - // Go1.8 extensible encoding - // read version string and extract version number (ignore anything after the version number) - versionstr = p.rawStringln(b) - if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { - if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { - version = v - } - } - } - p.version = version - - // read version specific flags - extend as necessary - switch p.version { - // case currentVersion: - // ... - // fallthrough - case currentVersion, 5, 4, 3, 2, 1: - p.debugFormat = p.rawStringln(p.rawByte()) == "debug" - p.trackAllTypes = p.int() != 0 - p.posInfoFormat = p.int() != 0 - case 0: - // Go1.7 encoding format - nothing to do here - default: - errorf("unknown bexport format version %d (%q)", p.version, versionstr) - } - - // --- generic export data --- - - // populate typList with predeclared "known" types - p.typList = append(p.typList, predeclared()...) - - // read package data - pkg = p.pkg() - - // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) - objcount := 0 - for { - tag := p.tagOrIndex() - if tag == endTag { - break - } - p.obj(tag) - objcount++ - } - - // self-verification - if count := p.int(); count != objcount { - errorf("got %d objects; want %d", objcount, count) - } - - // ignore compiler-specific import data - - // complete interfaces - // TODO(gri) re-investigate if we still need to do this in a delayed fashion - for _, typ := range p.interfaceList { - typ.Complete() - } - - // record all referenced packages as imports - list := append(([]*types.Package)(nil), p.pkgList[1:]...) - sort.Sort(byPath(list)) - pkg.SetImports(list) - - // package was imported completely and without errors - pkg.MarkComplete() - - return p.read, pkg, nil -} - func errorf(format string, args ...interface{}) { panic(fmt.Sprintf(format, args...)) } -func (p *importer) pkg() *types.Package { - // if the package was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.pkgList[i] - } - - // otherwise, i is the package tag (< 0) - if i != packageTag { - errorf("unexpected package tag %d version %d", i, p.version) - } - - // read package data - name := p.string() - var path string - if p.version >= 5 { - path = p.path() - } else { - path = p.string() - } - if p.version >= 6 { - p.int() // package height; unused by go/types - } - - // we should never see an empty package name - if name == "" { - errorf("empty package name in import") - } - - // an empty path denotes the package we are currently importing; - // it must be the first package we see - if (path == "") != (len(p.pkgList) == 0) { - errorf("package path %q for pkg index %d", path, len(p.pkgList)) - } - - // if the package was imported before, use that one; otherwise create a new one - if path == "" { - path = p.importpath - } - pkg := p.imports[path] - if pkg == nil { - pkg = types.NewPackage(path, name) - p.imports[path] = pkg - } else if pkg.Name() != name { - errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) - } - p.pkgList = append(p.pkgList, pkg) - - return pkg -} - -// objTag returns the tag value for each object kind. -func objTag(obj types.Object) int { - switch obj.(type) { - case *types.Const: - return constTag - case *types.TypeName: - return typeTag - case *types.Var: - return varTag - case *types.Func: - return funcTag - default: - errorf("unexpected object: %v (%T)", obj, obj) // panics - panic("unreachable") - } -} - -func sameObj(a, b types.Object) bool { - // Because unnamed types are not canonicalized, we cannot simply compare types for - // (pointer) identity. - // Ideally we'd check equality of constant values as well, but this is good enough. - return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) -} - -func (p *importer) declare(obj types.Object) { - pkg := obj.Pkg() - if alt := pkg.Scope().Insert(obj); alt != nil { - // This can only trigger if we import a (non-type) object a second time. - // Excluding type aliases, this cannot happen because 1) we only import a package - // once; and b) we ignore compiler-specific export data which may contain - // functions whose inlined function bodies refer to other functions that - // were already imported. - // However, type aliases require reexporting the original type, so we need - // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, - // method importer.obj, switch case importing functions). - // TODO(gri) review/update this comment once the gc compiler handles type aliases. - if !sameObj(obj, alt) { - errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) - } - } -} - -func (p *importer) obj(tag int) { - switch tag { - case constTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - val := p.value() - p.declare(types.NewConst(pos, pkg, name, typ, val)) - - case aliasTag: - // TODO(gri) verify type alias hookup is correct - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewTypeName(pos, pkg, name, typ)) - - case typeTag: - p.typ(nil, nil) - - case varTag: - pos := p.pos() - pkg, name := p.qualifiedName() - typ := p.typ(nil, nil) - p.declare(types.NewVar(pos, pkg, name, typ)) - - case funcTag: - pos := p.pos() - pkg, name := p.qualifiedName() - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(nil, params, result, isddd) - p.declare(types.NewFunc(pos, pkg, name, sig)) - - default: - errorf("unexpected object tag %d", tag) - } -} - const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go -func (p *importer) pos() token.Pos { - if !p.posInfoFormat { - return token.NoPos - } - - file := p.prevFile - line := p.prevLine - delta := p.int() - line += delta - if p.version >= 5 { - if delta == deltaNewFile { - if n := p.int(); n >= 0 { - // file changed - file = p.path() - line = n - } - } - } else { - if delta == 0 { - if n := p.int(); n >= 0 { - // file changed - file = p.prevFile[:n] + p.string() - line = p.int() - } - } - } - p.prevFile = file - p.prevLine = line - - return p.fake.pos(file, line, 0) -} - // Synthesize a token.Pos type fakeFileSet struct { fset *token.FileSet @@ -389,205 +73,6 @@ var ( fakeLinesOnce sync.Once ) -func (p *importer) qualifiedName() (pkg *types.Package, name string) { - name = p.string() - pkg = p.pkg() - return -} - -func (p *importer) record(t types.Type) { - p.typList = append(p.typList, t) -} - -// A dddSlice is a types.Type representing ...T parameters. -// It only appears for parameter types and does not escape -// the importer. -type dddSlice struct { - elem types.Type -} - -func (t *dddSlice) Underlying() types.Type { return t } -func (t *dddSlice) String() string { return "..." + t.elem.String() } - -// parent is the package which declared the type; parent == nil means -// the package currently imported. The parent package is needed for -// exported struct fields and interface methods which don't contain -// explicit package information in the export data. -// -// A non-nil tname is used as the "owner" of the result type; i.e., -// the result type is the underlying type of tname. tname is used -// to give interface methods a named receiver type where possible. -func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { - // if the type was seen before, i is its index (>= 0) - i := p.tagOrIndex() - if i >= 0 { - return p.typList[i] - } - - // otherwise, i is the type tag (< 0) - switch i { - case namedTag: - // read type object - pos := p.pos() - parent, name := p.qualifiedName() - scope := parent.Scope() - obj := scope.Lookup(name) - - // if the object doesn't exist yet, create and insert it - if obj == nil { - obj = types.NewTypeName(pos, parent, name, nil) - scope.Insert(obj) - } - - if _, ok := obj.(*types.TypeName); !ok { - errorf("pkg = %s, name = %s => %s", parent, name, obj) - } - - // associate new named type with obj if it doesn't exist yet - t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) - - // but record the existing type, if any - tname := obj.Type().(*types.Named) // tname is either t0 or the existing type - p.record(tname) - - // read underlying type - t0.SetUnderlying(p.typ(parent, t0)) - - // interfaces don't have associated methods - if types.IsInterface(t0) { - return tname - } - - // read associated methods - for i := p.int(); i > 0; i-- { - // TODO(gri) replace this with something closer to fieldName - pos := p.pos() - name := p.string() - if !exported(name) { - p.pkg() - } - - recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? - params, isddd := p.paramList() - result, _ := p.paramList() - p.int() // go:nointerface pragma - discarded - - sig := types.NewSignature(recv.At(0), params, result, isddd) - t0.AddMethod(types.NewFunc(pos, parent, name, sig)) - } - - return tname - - case arrayTag: - t := new(types.Array) - if p.trackAllTypes { - p.record(t) - } - - n := p.int64() - *t = *types.NewArray(p.typ(parent, nil), n) - return t - - case sliceTag: - t := new(types.Slice) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewSlice(p.typ(parent, nil)) - return t - - case dddTag: - t := new(dddSlice) - if p.trackAllTypes { - p.record(t) - } - - t.elem = p.typ(parent, nil) - return t - - case structTag: - t := new(types.Struct) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewStruct(p.fieldList(parent)) - return t - - case pointerTag: - t := new(types.Pointer) - if p.trackAllTypes { - p.record(t) - } - - *t = *types.NewPointer(p.typ(parent, nil)) - return t - - case signatureTag: - t := new(types.Signature) - if p.trackAllTypes { - p.record(t) - } - - params, isddd := p.paramList() - result, _ := p.paramList() - *t = *types.NewSignature(nil, params, result, isddd) - return t - - case interfaceTag: - // Create a dummy entry in the type list. This is safe because we - // cannot expect the interface type to appear in a cycle, as any - // such cycle must contain a named type which would have been - // first defined earlier. - // TODO(gri) Is this still true now that we have type aliases? - // See issue #23225. - n := len(p.typList) - if p.trackAllTypes { - p.record(nil) - } - - var embeddeds []types.Type - for n := p.int(); n > 0; n-- { - p.pos() - embeddeds = append(embeddeds, p.typ(parent, nil)) - } - - t := newInterface(p.methodList(parent, tname), embeddeds) - p.interfaceList = append(p.interfaceList, t) - if p.trackAllTypes { - p.typList[n] = t - } - return t - - case mapTag: - t := new(types.Map) - if p.trackAllTypes { - p.record(t) - } - - key := p.typ(parent, nil) - val := p.typ(parent, nil) - *t = *types.NewMap(key, val) - return t - - case chanTag: - t := new(types.Chan) - if p.trackAllTypes { - p.record(t) - } - - dir := chanDir(p.int()) - val := p.typ(parent, nil) - *t = *types.NewChan(dir, val) - return t - - default: - errorf("unexpected type tag %d", i) // panics - panic("unreachable") - } -} - func chanDir(d int) types.ChanDir { // tag values must match the constants in cmd/compile/internal/gc/go.go switch d { @@ -603,394 +88,6 @@ func chanDir(d int) types.ChanDir { } } -func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { - if n := p.int(); n > 0 { - fields = make([]*types.Var, n) - tags = make([]string, n) - for i := range fields { - fields[i], tags[i] = p.field(parent) - } - } - return -} - -func (p *importer) field(parent *types.Package) (*types.Var, string) { - pos := p.pos() - pkg, name, alias := p.fieldName(parent) - typ := p.typ(parent, nil) - tag := p.string() - - anonymous := false - if name == "" { - // anonymous field - typ must be T or *T and T must be a type name - switch typ := deref(typ).(type) { - case *types.Basic: // basic types are named types - pkg = nil // // objects defined in Universe scope have no package - name = typ.Name() - case *types.Named: - name = typ.Obj().Name() - default: - errorf("named base type expected") - } - anonymous = true - } else if alias { - // anonymous field: we have an explicit name because it's an alias - anonymous = true - } - - return types.NewField(pos, pkg, name, typ, anonymous), tag -} - -func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { - if n := p.int(); n > 0 { - methods = make([]*types.Func, n) - for i := range methods { - methods[i] = p.method(parent, baseType) - } - } - return -} - -func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { - pos := p.pos() - pkg, name, _ := p.fieldName(parent) - // If we don't have a baseType, use a nil receiver. - // A receiver using the actual interface type (which - // we don't know yet) will be filled in when we call - // types.Interface.Complete. - var recv *types.Var - if baseType != nil { - recv = types.NewVar(token.NoPos, parent, "", baseType) - } - params, isddd := p.paramList() - result, _ := p.paramList() - sig := types.NewSignature(recv, params, result, isddd) - return types.NewFunc(pos, pkg, name, sig) -} - -func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { - name = p.string() - pkg = parent - if pkg == nil { - // use the imported package instead - pkg = p.pkgList[0] - } - if p.version == 0 && name == "_" { - // version 0 didn't export a package for _ fields - return - } - switch name { - case "": - // 1) field name matches base type name and is exported: nothing to do - case "?": - // 2) field name matches base type name and is not exported: need package - name = "" - pkg = p.pkg() - case "@": - // 3) field name doesn't match type name (alias) - name = p.string() - alias = true - fallthrough - default: - if !exported(name) { - pkg = p.pkg() - } - } - return -} - -func (p *importer) paramList() (*types.Tuple, bool) { - n := p.int() - if n == 0 { - return nil, false - } - // negative length indicates unnamed parameters - named := true - if n < 0 { - n = -n - named = false - } - // n > 0 - params := make([]*types.Var, n) - isddd := false - for i := range params { - params[i], isddd = p.param(named) - } - return types.NewTuple(params...), isddd -} - -func (p *importer) param(named bool) (*types.Var, bool) { - t := p.typ(nil, nil) - td, isddd := t.(*dddSlice) - if isddd { - t = types.NewSlice(td.elem) - } - - var pkg *types.Package - var name string - if named { - name = p.string() - if name == "" { - errorf("expected named parameter") - } - if name != "_" { - pkg = p.pkg() - } - if i := strings.Index(name, "·"); i > 0 { - name = name[:i] // cut off gc-specific parameter numbering - } - } - - // read and discard compiler-specific info - p.string() - - return types.NewVar(token.NoPos, pkg, name, t), isddd -} - -func exported(name string) bool { - ch, _ := utf8.DecodeRuneInString(name) - return unicode.IsUpper(ch) -} - -func (p *importer) value() constant.Value { - switch tag := p.tagOrIndex(); tag { - case falseTag: - return constant.MakeBool(false) - case trueTag: - return constant.MakeBool(true) - case int64Tag: - return constant.MakeInt64(p.int64()) - case floatTag: - return p.float() - case complexTag: - re := p.float() - im := p.float() - return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) - case stringTag: - return constant.MakeString(p.string()) - case unknownTag: - return constant.MakeUnknown() - default: - errorf("unexpected value tag %d", tag) // panics - panic("unreachable") - } -} - -func (p *importer) float() constant.Value { - sign := p.int() - if sign == 0 { - return constant.MakeInt64(0) - } - - exp := p.int() - mant := []byte(p.string()) // big endian - - // remove leading 0's if any - for len(mant) > 0 && mant[0] == 0 { - mant = mant[1:] - } - - // convert to little endian - // TODO(gri) go/constant should have a more direct conversion function - // (e.g., once it supports a big.Float based implementation) - for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { - mant[i], mant[j] = mant[j], mant[i] - } - - // adjust exponent (constant.MakeFromBytes creates an integer value, - // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) - exp -= len(mant) << 3 - if len(mant) > 0 { - for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { - exp++ - } - } - - x := constant.MakeFromBytes(mant) - switch { - case exp < 0: - d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) - x = constant.BinaryOp(x, token.QUO, d) - case exp > 0: - x = constant.Shift(x, token.SHL, uint(exp)) - } - - if sign < 0 { - x = constant.UnaryOp(token.SUB, x, 0) - } - return x -} - -// ---------------------------------------------------------------------------- -// Low-level decoders - -func (p *importer) tagOrIndex() int { - if p.debugFormat { - p.marker('t') - } - - return int(p.rawInt64()) -} - -func (p *importer) int() int { - x := p.int64() - if int64(int(x)) != x { - errorf("exported integer too large") - } - return int(x) -} - -func (p *importer) int64() int64 { - if p.debugFormat { - p.marker('i') - } - - return p.rawInt64() -} - -func (p *importer) path() string { - if p.debugFormat { - p.marker('p') - } - // if the path was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.pathList[i] - } - // otherwise, i is the negative path length (< 0) - a := make([]string, -i) - for n := range a { - a[n] = p.string() - } - s := strings.Join(a, "/") - p.pathList = append(p.pathList, s) - return s -} - -func (p *importer) string() string { - if p.debugFormat { - p.marker('s') - } - // if the string was seen before, i is its index (>= 0) - // (the empty string is at index 0) - i := p.rawInt64() - if i >= 0 { - return p.strList[i] - } - // otherwise, i is the negative string length (< 0) - if n := int(-i); n <= cap(p.buf) { - p.buf = p.buf[:n] - } else { - p.buf = make([]byte, n) - } - for i := range p.buf { - p.buf[i] = p.rawByte() - } - s := string(p.buf) - p.strList = append(p.strList, s) - return s -} - -func (p *importer) marker(want byte) { - if got := p.rawByte(); got != want { - errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) - } - - pos := p.read - if n := int(p.rawInt64()); n != pos { - errorf("incorrect position: got %d; want %d", n, pos) - } -} - -// rawInt64 should only be used by low-level decoders. -func (p *importer) rawInt64() int64 { - i, err := binary.ReadVarint(p) - if err != nil { - errorf("read error: %v", err) - } - return i -} - -// rawStringln should only be used to read the initial version string. -func (p *importer) rawStringln(b byte) string { - p.buf = p.buf[:0] - for b != '\n' { - p.buf = append(p.buf, b) - b = p.rawByte() - } - return string(p.buf) -} - -// needed for binary.ReadVarint in rawInt64 -func (p *importer) ReadByte() (byte, error) { - return p.rawByte(), nil -} - -// byte is the bottleneck interface for reading p.data. -// It unescapes '|' 'S' to '$' and '|' '|' to '|'. -// rawByte should only be used by low-level decoders. -func (p *importer) rawByte() byte { - b := p.data[0] - r := 1 - if b == '|' { - b = p.data[1] - r = 2 - switch b { - case 'S': - b = '$' - case '|': - // nothing to do - default: - errorf("unexpected escape sequence in export data") - } - } - p.data = p.data[r:] - p.read += r - return b - -} - -// ---------------------------------------------------------------------------- -// Export format - -// Tags. Must be < 0. -const ( - // Objects - packageTag = -(iota + 1) - constTag - typeTag - varTag - funcTag - endTag - - // Types - namedTag - arrayTag - sliceTag - dddTag - structTag - pointerTag - signatureTag - interfaceTag - mapTag - chanTag - - // Values - falseTag - trueTag - int64Tag - floatTag - fractionTag // not used by gc - complexTag - stringTag - nilTag // only used by gc (appears in exported inlined function bodies) - unknownTag // not used by gc (only appears in packages with errors) - - // Type aliases - aliasTag -) - var predeclOnce sync.Once var predecl []types.Type // initialized lazily diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 0372fb3a..2d078ccb 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -7,6 +7,18 @@ // Package gcimporter provides various functions for reading // gc-generated object files that can be used to implement the // Importer interface defined by the Go 1.5 standard library package. +// +// The encoding is deterministic: if the encoder is applied twice to +// the same types.Package data structure, both encodings are equal. +// This property may be important to avoid spurious changes in +// applications such as build systems. +// +// However, the encoder is not necessarily idempotent. Importing an +// exported package may yield a types.Package that, while it +// represents the same set of Go types as the original, may differ in +// the details of its internal representation. Because of these +// differences, re-encoding the imported package may yield a +// different, but equally valid, encoding of the package. package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( @@ -17,7 +29,6 @@ import ( "go/token" "go/types" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -209,7 +220,7 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func switch hdr { case "$$B\n": var data []byte - data, err = ioutil.ReadAll(buf) + data, err = io.ReadAll(buf) if err != nil { break } @@ -218,20 +229,17 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Or, define a new standard go/types/gcexportdata package. fset := token.NewFileSet() - // The indexed export format starts with an 'i'; the older - // binary export format starts with a 'c', 'd', or 'v' - // (from "version"). Select appropriate importer. + // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'i': - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err + case 'v', 'c', 'd': // binary, till go1.10 + return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'v', 'c', 'd': - _, pkg, err := BImportData(fset, packages, data, id) + case 'i': // indexed, till go1.19 + _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': + case 'u': // unified, from go1.20 _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index ba53cdcd..2ee8c701 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -22,17 +22,22 @@ import ( "strconv" "strings" + "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/tokeninternal" - "golang.org/x/tools/internal/typeparams" ) // IExportShallow encodes "shallow" export data for the specified package. // -// No promises are made about the encoding other than that it can be -// decoded by the same version of IIExportShallow. If you plan to save -// export data in the file system, be sure to include a cryptographic -// digest of the executable in the key to avoid version skew. -func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { // In principle this operation can only fail if out.Write fails, // but that's impossible for bytes.Buffer---and as a matter of // fact iexportCommon doesn't even check for I/O errors. @@ -44,22 +49,30 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { return out.Bytes(), err } -// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow -// in the same executable. This function cannot import data from +// IImportShallow decodes "shallow" types.Package data encoded by +// IExportShallow in the same executable. This function cannot import data from // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { const bundle = false - pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) if err != nil { return nil, err } return pkgs[0], nil } -// InsertType is the type of a function that creates a types.TypeName -// object for a named type and inserts it into the scope of the -// specified Package. -type InsertType = func(pkg *types.Package, name string) +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{}) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -313,8 +326,9 @@ type iexporter struct { out *bytes.Buffer version int - shallow bool // don't put types from other packages in the index - localpkg *types.Package // (nil in bundle mode) + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) // allPkgs tracks all packages that have been referenced by // the export data, so we can ensure to include them in the @@ -354,6 +368,17 @@ func (p *iexporter) trace(format string, args ...interface{}) { fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) } +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + // stringOff returns the offset of s within the string section. // If not already present, it's added to the end. func (p *iexporter) stringOff(s string) uint64 { @@ -413,7 +438,6 @@ type exportWriter struct { p *iexporter data intWriter - currPkg *types.Package prevFile string prevLine int64 prevColumn int64 @@ -436,7 +460,6 @@ func (p *iexporter) doDecl(obj types.Object) { }() } w := p.newWriter() - w.setPkg(obj.Pkg(), false) switch obj := obj.(type) { case *types.Var: @@ -457,7 +480,7 @@ func (p *iexporter) doDecl(obj types.Object) { } // Function. - if typeparams.ForSignature(sig).Len() == 0 { + if sig.TypeParams().Len() == 0 { w.tag('F') } else { w.tag('G') @@ -470,7 +493,7 @@ func (p *iexporter) doDecl(obj types.Object) { // // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + if tparams := sig.TypeParams(); tparams.Len() > 0 { w.tparamList(obj.Name(), tparams, obj.Pkg()) } w.signature(sig) @@ -483,14 +506,14 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := t.(*typeparams.TypeParam); ok { + if tparam, ok := t.(*types.TypeParam); ok { w.tag('P') w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false if iface, _ := constraint.(*types.Interface); iface != nil { - implicit = typeparams.IsImplicit(iface) + implicit = iface.IsImplicit() } w.bool(implicit) } @@ -511,17 +534,17 @@ func (p *iexporter) doDecl(obj types.Object) { panic(internalErrorf("%s is not a defined type", t)) } - if typeparams.ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { w.tag('T') } else { w.tag('U') } w.pos(obj.Pos()) - if typeparams.ForNamed(named).Len() > 0 { + if named.TypeParams().Len() > 0 { // While importing the type parameters, tparamList computes and records // their export name, so that it can be later used when writing the index. - w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) } underlying := obj.Type().Underlying() @@ -541,7 +564,7 @@ func (p *iexporter) doDecl(obj types.Object) { // Receiver type parameters are type arguments of the receiver type, so // their name must be qualified before exporting recv. - if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + if rparams := sig.RecvTypeParams(); rparams.Len() > 0 { prefix := obj.Name() + "." + m.Name() for i := 0; i < rparams.Len(); i++ { rparam := rparams.At(i) @@ -673,6 +696,9 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) { w.pkg(obj.Pkg()) } +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. func (w *exportWriter) typ(t types.Type, pkg *types.Package) { w.data.uint64(w.p.typOff(t, pkg)) } @@ -713,19 +739,19 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } switch t := t.(type) { case *types.Named: - if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) // TODO(rfindley): investigate if this position is correct, and if it // matters. w.pos(t.Obj().Pos()) w.typeList(targs, pkg) - w.typ(typeparams.NamedTypeOrigin(t), pkg) + w.typ(t.Origin(), pkg) return } w.startType(definedType) w.qualifiedType(t.Obj()) - case *typeparams.TypeParam: + case *types.TypeParam: w.startType(typeParamType) w.qualifiedType(t.Obj()) @@ -764,30 +790,53 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Signature: w.startType(signatureType) - w.setPkg(pkg, true) + w.pkg(pkg) w.signature(t) case *types.Struct: w.startType(structType) n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg if n > 0 { - w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects - } else { - w.setPkg(pkg, true) + fieldPkg = t.Field(0).Pkg() + } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } } + w.pkg(fieldPkg) w.uint64(uint64(n)) + for i := 0; i < n; i++ { f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } w.pos(f.Pos()) w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg - w.typ(f.Type(), pkg) + w.typ(f.Type(), fieldPkg) w.bool(f.Anonymous()) w.string(t.Tag(i)) // note (or tag) } case *types.Interface: w.startType(interfaceType) - w.setPkg(pkg, true) + w.pkg(pkg) n := t.NumEmbeddeds() w.uint64(uint64(n)) @@ -802,17 +851,23 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.typ(ft, tPkg) } + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + n = t.NumExplicitMethods() w.uint64(uint64(n)) for i := 0; i < n; i++ { m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } w.pos(m.Pos()) w.string(m.Name()) sig, _ := m.Type().(*types.Signature) w.signature(sig) } - case *typeparams.Union: + case *types.Union: w.startType(unionType) nt := t.Len() w.uint64(uint64(nt)) @@ -827,12 +882,61 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } } -func (w *exportWriter) setPkg(pkg *types.Package, write bool) { - if write { - w.pkg(pkg) +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return } - - w.currPkg = pkg + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) } func (w *exportWriter) signature(sig *types.Signature) { @@ -843,14 +947,14 @@ func (w *exportWriter) signature(sig *types.Signature) { } } -func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { +func (w *exportWriter) typeList(ts *types.TypeList, pkg *types.Package) { w.uint64(uint64(ts.Len())) for i := 0; i < ts.Len(); i++ { w.typ(ts.At(i), pkg) } } -func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { +func (w *exportWriter) tparamList(prefix string, list *types.TypeParamList, pkg *types.Package) { ll := uint64(list.Len()) w.uint64(ll) for i := 0; i < list.Len(); i++ { @@ -868,7 +972,7 @@ const blankMarker = "$" // differs from its actual object name: it is prefixed with a qualifier, and // blank type parameter names are disambiguated by their index in the type // parameter list. -func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { +func tparamExportName(prefix string, tparam *types.TypeParam) string { assert(prefix != "") name := tparam.Obj().Name() if name == "_" { @@ -913,6 +1017,17 @@ func (w *exportWriter) value(typ types.Type, v constant.Value) { w.int64(int64(v.Kind())) } + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { case types.IsBoolean: w.bool(constant.BoolVal(v)) @@ -969,6 +1084,16 @@ func constantToFloat(x constant.Value) *big.Float { return &f } +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + // mpint exports a multi-precision integer. // // For unsigned types, small values are written out as a single @@ -1178,3 +1303,19 @@ func (q *objQueue) popHead() types.Object { q.head++ return obj } + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 448f903e..9bde15e3 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/go/types/objectpath" ) type intReader struct { @@ -85,7 +85,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, imports, data, false, path, nil) + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) if err != nil { return 0, nil, err } @@ -94,10 +94,49 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, imports, data, true, "", nil) + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) } -func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. +// +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} + +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. +// +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg + } + return nil + } +} + +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { @@ -108,7 +147,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data } else if version > currentVersion { err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) } else { - err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + err = fmt.Errorf("internal error while importing %q (%v); please report an issue", path, e) } } }() @@ -117,11 +156,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data r := &intReader{bytes.NewReader(data), path} if bundle { - bundleVersion := r.uint64() - switch bundleVersion { - case bundleVersion: - default: - errorf("unknown bundle format version %d", bundleVersion) + if v := r.uint64(); v != bundleVersion { + errorf("unknown bundle format version %d", v) } } @@ -139,7 +175,7 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data sLen := int64(r.uint64()) var fLen int64 var fileOffset []uint64 - if insert != nil { + if shallow { // Shallow mode uses a different position encoding. fLen = int64(r.uint64()) fileOffset = make([]uint64, r.uint64()) @@ -158,7 +194,8 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data p := iimporter{ version: int(version), ipath: path, - insert: insert, + shallow: shallow, + reportf: reportf, stringData: stringData, stringCache: make(map[uint64]string), @@ -185,8 +222,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data p.typCache[uint64(i)] = pt } - pkgList := make([]*types.Package, r.uint64()) - for i := range pkgList { + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) pkgName := p.stringAt(r.uint64()) @@ -195,30 +233,42 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data if pkgPath == "" { pkgPath = path } - pkg := imports[pkgPath] - if pkg == nil { - pkg = types.NewPackage(pkgPath, pkgName) - imports[pkgPath] = pkg - } else if pkg.Name() != pkgName { - errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) - } - if i == 0 && !bundle { - p.localpkg = pkg - } - - p.pkgCache[pkgPathOff] = pkg + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff // Read index for package. nameIndex := make(map[string]uint64) nSyms := r.uint64() - // In shallow mode we don't expect an index for other packages. - assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) for ; nSyms > 0; nSyms-- { name := p.stringAt(r.uint64()) nameIndex[name] = r.uint64() } - p.pkgIndex[pkg] = nameIndex + items[i].nameIndex = nameIndex + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex pkgList[i] = pkg } @@ -270,18 +320,25 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data // Therefore, we defer calling SetConstraint there, and call it here instead // after all types are complete. for _, d := range p.later { - typeparams.SetTypeParamConstraint(d.t, d.constraint) + d.t.SetConstraint(d.constraint) } for _, typ := range p.interfaceList { typ.Complete() } + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + return pkgs, nil } type setConstraintArgs struct { - t *typeparams.TypeParam + t *types.TypeParam constraint types.Type } @@ -289,8 +346,8 @@ type iimporter struct { version int ipath string - localpkg *types.Package - insert func(pkg *types.Package, name string) // "shallow" mode only + shallow bool + reportf ReportFunc // if non-nil, used to report bugs stringData []byte stringCache map[uint64]string @@ -307,6 +364,12 @@ type iimporter struct { fake fakeFileSet interfaceList []*types.Interface + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + // Arguments for calls to SetConstraint that are deferred due to recursive types later []setConstraintArgs @@ -338,13 +401,9 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { off, ok := p.pkgIndex[pkg][name] if !ok { - // In "shallow" mode, call back to the application to - // find the object and insert it into the package scope. - if p.insert != nil { - assert(pkg != p.localpkg) - p.insert(pkg, name) // "can't fail" - return - } + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. errorf("%v.%v not in index", pkg, name) } @@ -489,7 +548,7 @@ func (r *importReader) obj(name string) { r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) case 'F', 'G': - var tparams []*typeparams.TypeParam + var tparams []*types.TypeParam if tag == 'G' { tparams = r.tparamList() } @@ -506,7 +565,7 @@ func (r *importReader) obj(name string) { r.declare(obj) if tag == 'U' { tparams := r.tparamList() - typeparams.SetForNamed(named, tparams) + named.SetTypeParams(tparams) } underlying := r.p.typAt(r.uint64(), named).Underlying() @@ -523,12 +582,12 @@ func (r *importReader) obj(name string) { // typeparams being used in the method sig/body). base := baseType(recv.Type()) assert(base != nil) - targs := typeparams.NamedTypeArgs(base) - var rparams []*typeparams.TypeParam + targs := base.TypeArgs() + var rparams []*types.TypeParam if targs.Len() > 0 { - rparams = make([]*typeparams.TypeParam, targs.Len()) + rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = targs.At(i).(*typeparams.TypeParam) + rparams[i] = targs.At(i).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -546,7 +605,7 @@ func (r *importReader) obj(name string) { } name0 := tparamName(name) tn := types.NewTypeName(pos, r.currPkg, name0, nil) - t := typeparams.NewTypeParam(tn, nil) + t := types.NewTypeParam(tn, nil) // To handle recursive references to the typeparam within its // bound, save the partial type in tparamIndex before reading the bounds. @@ -562,7 +621,7 @@ func (r *importReader) obj(name string) { if iface == nil { errorf("non-interface constraint marked implicit") } - typeparams.MarkImplicit(iface) + iface.MarkImplicit() } // The constraint type may not be complete, if we // are in the middle of a type recursion involving type @@ -711,7 +770,8 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) { } func (r *importReader) pos() token.Pos { - if r.p.insert != nil { // shallow mode + if r.p.shallow { + // precise offsets are encoded only in shallow mode return r.posv2() } if r.p.version >= iexportVersionPosCol { @@ -812,13 +872,28 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { fields := make([]*types.Var, r.uint64()) tags := make([]string, len(fields)) for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + fpos := r.pos() fname := r.ident() ftyp := r.typ() emb := r.bool() tag := r.string() - fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field tags[i] = tag } return types.NewStruct(fields, tags) @@ -834,6 +909,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods := make([]*types.Func, r.uint64()) for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + mpos := r.pos() mname := r.ident() @@ -843,9 +923,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { if base != nil { recv = types.NewVar(token.NoPos, r.currPkg, "", base) } - msig := r.signature(recv, nil, nil) - methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method } typ := newInterface(methods, embeddeds) @@ -882,18 +965,21 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // The imported instantiated type doesn't include any methods, so // we must always use the methods of the base (orig) type. // TODO provide a non-nil *Environment - t, _ := typeparams.Instantiate(nil, baseType, targs, false) + t, _ := types.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) return t case unionType: if r.p.version < iexportVersionGenerics { errorf("unexpected instantiation type") } - terms := make([]*typeparams.Term, r.uint64()) + terms := make([]*types.Term, r.uint64()) for i := range terms { - terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + terms[i] = types.NewTerm(r.bool(), r.typ()) } - return typeparams.NewUnion(terms) + return types.NewUnion(terms) } } @@ -901,23 +987,43 @@ func (r *importReader) kind() itag { return itag(r.uint64()) } -func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + +func (r *importReader) signature(recv *types.Var, rparams []*types.TypeParam, tparams []*types.TypeParam) *types.Signature { params := r.paramList() results := r.paramList() variadic := params.Len() > 0 && r.bool() - return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) + return types.NewSignatureType(recv, rparams, tparams, params, results, variadic) } -func (r *importReader) tparamList() []*typeparams.TypeParam { +func (r *importReader) tparamList() []*types.TypeParam { n := r.uint64() if n == 0 { return nil } - xs := make([]*typeparams.TypeParam, n) + xs := make([]*types.TypeParam, n) for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = r.typ().(*typeparams.TypeParam) + xs[i] = r.typ().(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b285a11c..b977435f 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -10,8 +10,10 @@ package gcimporter import ( + "fmt" "go/token" "go/types" + "sort" "strings" "golang.org/x/tools/internal/pkgbits" @@ -62,6 +64,14 @@ type typeInfo struct { } func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + if !debug { + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("internal error in importing %q (%v); please report an issue", path, x) + } + }() + } + s := string(data) s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) @@ -121,6 +131,16 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st iface.Complete() } + // Imports() of pkg are all of the transitive packages that were loaded. + var imps []*types.Package + for _, imp := range pr.pkgs { + if imp != nil && imp != pkg { + imps = append(imps, imp) + } + } + sort.Sort(byPath(imps)) + pkg.SetImports(imps) + pkg.MarkComplete() return pkg } @@ -260,39 +280,9 @@ func (r *reader) doPkg() *types.Package { pkg := types.NewPackage(path, name) r.p.imports[path] = pkg - imports := make([]*types.Package, r.Len()) - for i := range imports { - imports[i] = r.pkg() - } - pkg.SetImports(flattenImports(imports)) - return pkg } -// flattenImports returns the transitive closure of all imported -// packages rooted from pkgs. -func flattenImports(pkgs []*types.Package) []*types.Package { - var res []*types.Package - seen := make(map[*types.Package]struct{}) - for _, pkg := range pkgs { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - - // pkg.Imports() is already flattened. - for _, pkg := range pkg.Imports() { - if _, ok := seen[pkg]; ok { - continue - } - seen[pkg] = struct{}{} - res = append(res, pkg) - } - } - return res -} - // @@@ Types func (r *reader) typ() types.Type { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index d5055169..55312522 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -8,10 +8,13 @@ package gocommand import ( "bytes" "context" + "errors" "fmt" "io" "log" "os" + "os/exec" + "reflect" "regexp" "runtime" "strconv" @@ -19,9 +22,10 @@ import ( "sync" "time" - exec "golang.org/x/sys/execabs" - "golang.org/x/tools/internal/event" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" + "golang.org/x/tools/internal/event/tag" ) // An Runner will run go command invocations and serialize @@ -51,9 +55,19 @@ func (runner *Runner) initialize() { // 1.14: go: updating go.mod: existing contents have changed since last read var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) +// verb is an event label for the go command verb. +var verb = keys.NewString("verb", "go command verb") + +func invLabels(inv Invocation) []label.Label { + return []label.Label{verb.Of(inv.Verb), tag.Directory.Of(inv.WorkingDir)} +} + // Run is a convenience wrapper around RunRaw. // It returns only stdout and a "friendly" error. func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.Run", invLabels(inv)...) + defer done() + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) return stdout, friendly } @@ -61,13 +75,19 @@ func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, e // RunPiped runs the invocation serially, always waiting for any concurrent // invocations to complete first. func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + ctx, done := event.Start(ctx, "gocommand.Runner.RunPiped", invLabels(inv)...) + defer done() + _, err := runner.runPiped(ctx, inv, stdout, stderr) return err } // RunRaw runs the invocation, serializing requests only if they fight over // go.mod changes. +// Postcondition: both error results have same nilness. func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + ctx, done := event.Start(ctx, "gocommand.Runner.RunRaw", invLabels(inv)...) + defer done() // Make sure the runner is always initialized. runner.initialize() @@ -75,23 +95,24 @@ func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) // If we encounter a load concurrency error, we need to retry serially. - if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { - return stdout, stderr, friendlyErr, err + if friendlyErr != nil && modConcurrencyError.MatchString(friendlyErr.Error()) { + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) } - event.Error(ctx, "Load concurrency error, will retry serially", err) - // Run serially by calling runPiped. - stdout.Reset() - stderr.Reset() - friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { // Wait for 1 worker to become available. select { case <-ctx.Done(): - return nil, nil, nil, ctx.Err() + return nil, nil, ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: defer func() { <-runner.inFlight }() } @@ -101,6 +122,7 @@ func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes return stdout, stderr, friendlyErr, err } +// Postcondition: both error results have same nilness. func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { // Make sure the runner is always initialized. runner.initialize() @@ -109,7 +131,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde // runPiped commands. select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.serialized <- struct{}{}: defer func() { <-runner.serialized }() } @@ -119,7 +141,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde for i := 0; i < maxInFlight; i++ { select { case <-ctx.Done(): - return nil, ctx.Err() + return ctx.Err(), ctx.Err() case runner.inFlight <- struct{}{}: // Make sure we always "return" any workers we took. defer func() { <-runner.inFlight }() @@ -152,6 +174,7 @@ type Invocation struct { Logf func(format string, args ...interface{}) } +// Postcondition: both error results have same nilness. func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { rawError = i.run(ctx, stdout, stderr) if rawError != nil { @@ -215,6 +238,18 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd := exec.Command("go", goArgs...) cmd.Stdout = stdout cmd.Stderr = stderr + + // cmd.WaitDelay was added only in go1.20 (see #50436). + if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + waitDelay.Set(reflect.ValueOf(30 * time.Second)) + } + // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. @@ -229,6 +264,7 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) cmd.Dir = i.WorkingDir } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) return runCmdContext(ctx, cmd) @@ -242,10 +278,85 @@ var DebugHangingGoCommands = false // runCmdContext is like exec.CommandContext except it sends os.Interrupt // before os.Kill. -func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { - if err := cmd.Start(); err != nil { +func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { + // If cmd.Stdout is not an *os.File, the exec package will create a pipe and + // copy it to the Writer in a goroutine until the process has finished and + // either the pipe reaches EOF or command's WaitDelay expires. + // + // However, the output from 'go list' can be quite large, and we don't want to + // keep reading (and allocating buffers) if we've already decided we don't + // care about the output. We don't want to wait for the process to finish, and + // we don't wait to wait for the WaitDelay to expire either. + // + // Instead, if cmd.Stdout requires a copying goroutine we explicitly replace + // it with a pipe (which is an *os.File), which we can close in order to stop + // copying output as soon as we realize we don't care about it. + var stdoutW *os.File + if cmd.Stdout != nil { + if _, ok := cmd.Stdout.(*os.File); !ok { + var stdoutR *os.File + stdoutR, stdoutW, err = os.Pipe() + if err != nil { + return err + } + prevStdout := cmd.Stdout + cmd.Stdout = stdoutW + + stdoutErr := make(chan error, 1) + go func() { + _, err := io.Copy(prevStdout, stdoutR) + if err != nil { + err = fmt.Errorf("copying stdout: %w", err) + } + stdoutErr <- err + }() + defer func() { + // We started a goroutine to copy a stdout pipe. + // Wait for it to finish, or terminate it if need be. + var err2 error + select { + case err2 = <-stdoutErr: + stdoutR.Close() + case <-ctx.Done(): + stdoutR.Close() + // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close + // should cause the Read call in io.Copy to unblock and return + // immediately, but we still need to receive from stdoutErr to confirm + // that it has happened. + <-stdoutErr + err2 = ctx.Err() + } + if err == nil { + err = err2 + } + }() + + // Per https://pkg.go.dev/os/exec#Cmd, “If Stdout and Stderr are the + // same writer, and have a type that can be compared with ==, at most + // one goroutine at a time will call Write.†+ // + // Since we're starting a goroutine that writes to cmd.Stdout, we must + // also update cmd.Stderr so that it still holds. + func() { + defer func() { recover() }() + if cmd.Stderr == prevStdout { + cmd.Stderr = cmd.Stdout + } + }() + } + } + + err = cmd.Start() + if stdoutW != nil { + // The child process has inherited the pipe file, + // so close the copy held in this process. + stdoutW.Close() + stdoutW = nil + } + if err != nil { return err } + resChan := make(chan error, 1) go func() { resChan <- cmd.Wait() @@ -253,11 +364,14 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { // If we're interested in debugging hanging Go commands, stop waiting after a // minute and panic with interesting information. - if DebugHangingGoCommands { + debug := DebugHangingGoCommands + if debug { + timer := time.NewTimer(1 * time.Minute) + defer timer.Stop() select { case err := <-resChan: return err - case <-time.After(1 * time.Minute): + case <-timer.C: HandleHangingGoCommand(cmd.Process) case <-ctx.Done(): } @@ -270,30 +384,25 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { } // Cancelled. Interrupt and see if it ends voluntarily. - cmd.Process.Signal(os.Interrupt) - select { - case err := <-resChan: - return err - case <-time.After(time.Second): + if err := cmd.Process.Signal(os.Interrupt); err == nil { + // (We used to wait only 1s but this proved + // fragile on loaded builder machines.) + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case err := <-resChan: + return err + case <-timer.C: + } } // Didn't shut down in response to interrupt. Kill it hard. // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT // on certain platforms, such as unix. - if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { - // Don't panic here as this reliably fails on windows with EINVAL. + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } - // See above: don't wait indefinitely if we're debugging hanging Go commands. - if DebugHangingGoCommands { - select { - case err := <-resChan: - return err - case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill - HandleHangingGoCommand(cmd.Process) - } - } return <-resChan } diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 307a76d4..446c5846 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -23,21 +23,11 @@ import ( func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} - inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags, and remove them from BuildFlags, if they're - // present. - inv.ModFile = "" + inv.BuildFlags = nil // This is not a build command. inv.ModFlag = "" - var buildFlags []string - for _, flag := range inv.BuildFlags { - // Flags can be prefixed by one or two dashes. - f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") - if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { - continue - } - buildFlags = append(buildFlags, flag) - } - inv.BuildFlags = buildFlags + inv.ModFile = "" + inv.Env = append(inv.Env[:len(inv.Env):len(inv.Env)], "GO111MODULE=off") + stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 16840532..52f74e64 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -9,15 +9,12 @@ package gopathwalk import ( "bufio" "bytes" - "fmt" - "io/ioutil" + "io/fs" "log" "os" "path/filepath" "strings" "time" - - "golang.org/x/tools/internal/fastwalk" ) // Options controls the behavior of a Walk call. @@ -47,21 +44,18 @@ type Root struct { } // Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. -// For each package found, add will be called (concurrently) with the absolute +// For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. -// add will be called concurrently. func Walk(roots []Root, add func(root Root, dir string), opts Options) { WalkSkip(roots, add, func(Root, string) bool { return false }, opts) } // WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. -// For each package found, add will be called (concurrently) with the absolute +// For each package found, add will be called with the absolute // paths of the containing source directory and the package directory. -// For each directory that will be scanned, skip will be called (concurrently) +// For each directory that will be scanned, skip will be called // with the absolute paths of the containing source directory and the directory. // If skip returns false on a directory it will be processed. -// add will be called concurrently. -// skip will be called concurrently. func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { for _, root := range roots { walkDir(root, add, skip, opts) @@ -78,21 +72,36 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) } start := time.Now() if opts.Logf != nil { - opts.Logf("gopathwalk: scanning %s", root.Path) + opts.Logf("scanning %s", root.Path) } + w := &walker{ - root: root, - add: add, - skip: skip, - opts: opts, + root: root, + add: add, + skip: skip, + opts: opts, + added: make(map[string]bool), } w.init() - if err := fastwalk.Walk(root.Path, w.walk); err != nil { - log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + + // Add a trailing path separator to cause filepath.WalkDir to traverse symlinks. + path := root.Path + if len(path) == 0 { + path = "." + string(filepath.Separator) + } else if !os.IsPathSeparator(path[len(path)-1]) { + path = path + string(filepath.Separator) + } + + if err := filepath.WalkDir(path, w.walk); err != nil { + logf := opts.Logf + if logf == nil { + logf = log.Printf + } + logf("scanning directory %v: %v", root.Path, err) } if opts.Logf != nil { - opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) + opts.Logf("scanned %s in %v", root.Path, time.Since(start)) } } @@ -103,7 +112,10 @@ type walker struct { skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. opts Options // Options passed to Walk by the user. - ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. + pathSymlinks []os.FileInfo + ignoredDirs []string + + added map[string]bool } // init initializes the walker based on its Options @@ -119,13 +131,9 @@ func (w *walker) init() { for _, p := range ignoredPaths { full := filepath.Join(w.root.Path, p) - if fi, err := os.Stat(full); err == nil { - w.ignoredDirs = append(w.ignoredDirs, fi) - if w.opts.Logf != nil { - w.opts.Logf("Directory added to ignore list: %s", full) - } - } else if w.opts.Logf != nil { - w.opts.Logf("Error statting ignored directory: %v", err) + w.ignoredDirs = append(w.ignoredDirs, full) + if w.opts.Logf != nil { + w.opts.Logf("Directory added to ignore list: %s", full) } } } @@ -135,7 +143,7 @@ func (w *walker) init() { // The provided path is one of the $GOPATH entries with "src" appended. func (w *walker) getIgnoredDirs(path string) []string { file := filepath.Join(path, ".goimportsignore") - slurp, err := ioutil.ReadFile(file) + slurp, err := os.ReadFile(file) if w.opts.Logf != nil { if err != nil { w.opts.Logf("%v", err) @@ -160,9 +168,9 @@ func (w *walker) getIgnoredDirs(path string) []string { } // shouldSkipDir reports whether the file should be skipped or not. -func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { +func (w *walker) shouldSkipDir(dir string) bool { for _, ignoredDir := range w.ignoredDirs { - if os.SameFile(fi, ignoredDir) { + if dir == ignoredDir { return true } } @@ -174,81 +182,150 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { } // walk walks through the given path. -func (w *walker) walk(path string, typ os.FileMode) error { - if typ.IsRegular() { +// +// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored: +// walk returns only nil or fs.SkipDir. +func (w *walker) walk(path string, d fs.DirEntry, err error) error { + if err != nil { + // We have no way to report errors back through Walk or WalkSkip, + // so just log and ignore them. + if w.opts.Logf != nil { + w.opts.Logf("%v", err) + } + if d == nil { + // Nothing more to do: the error prevents us from knowing + // what path even represents. + return nil + } + } + + if d.Type().IsRegular() { + if !strings.HasSuffix(path, ".go") { + return nil + } + dir := filepath.Dir(path) if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { // Doesn't make sense to have regular files // directly in your $GOPATH/src or $GOROOT/src. - return fastwalk.ErrSkipFiles - } - if !strings.HasSuffix(path, ".go") { return nil } - w.add(w.root, dir) - return fastwalk.ErrSkipFiles + if !w.added[dir] { + w.add(w.root, dir) + w.added[dir] = true + } + return nil } - if typ == os.ModeDir { + + if d.IsDir() { base := filepath.Base(path) if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" || (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || (!w.opts.ModulesEnabled && base == "node_modules") { - return filepath.SkipDir + return fs.SkipDir } - fi, err := os.Lstat(path) - if err == nil && w.shouldSkipDir(fi, path) { - return filepath.SkipDir + if w.shouldSkipDir(path) { + return fs.SkipDir } return nil } - if typ == os.ModeSymlink { - base := filepath.Base(path) - if strings.HasPrefix(base, ".#") { - // Emacs noise. - return nil + + if d.Type()&os.ModeSymlink != 0 { + // TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src + // and GOPATH/src. Do we really need to traverse them here? If so, why? + + fi, err := os.Stat(path) + if err != nil || !fi.IsDir() { + // Not a directory. Just walk the file (or broken link) and be done. + return w.walk(path, fs.FileInfoToDirEntry(fi), err) } - if w.shouldTraverse(path) { - return fastwalk.ErrTraverseLink + + // Avoid walking symlink cycles: if we have already followed a symlink to + // this directory as a parent of itself, don't follow it again. + // + // This doesn't catch the first time through a cycle, but it also minimizes + // the number of extra stat calls we make if we *don't* encounter a cycle. + // Since we don't actually expect to encounter symlink cycles in practice, + // this seems like the right tradeoff. + for _, parent := range w.pathSymlinks { + if os.SameFile(fi, parent) { + return nil + } } - } - return nil -} -// shouldTraverse reports whether the symlink fi, found in dir, -// should be followed. It makes sure symlinks were never visited -// before to avoid symlink loops. -func (w *walker) shouldTraverse(path string) bool { - ts, err := os.Stat(path) - if err != nil { - fmt.Fprintln(os.Stderr, err) - return false - } - if !ts.IsDir() { - return false - } - if w.shouldSkipDir(ts, filepath.Dir(path)) { - return false - } - // Check for symlink loops by statting each directory component - // and seeing if any are the same file as ts. - for { - parent := filepath.Dir(path) - if parent == path { - // Made it to the root without seeing a cycle. - // Use this symlink. - return true + w.pathSymlinks = append(w.pathSymlinks, fi) + defer func() { + w.pathSymlinks = w.pathSymlinks[:len(w.pathSymlinks)-1] + }() + + // On some platforms the OS (or the Go os package) sometimes fails to + // resolve directory symlinks before a trailing slash + // (even though POSIX requires it to do so). + // + // On macOS that failure may be caused by a known libc/kernel bug; + // see https://go.dev/issue/59586. + // + // On Windows before Go 1.21, it may be caused by a bug in + // os.Lstat (fixed in https://go.dev/cl/463177). + // + // Since we need to handle this explicitly on broken platforms anyway, + // it is simplest to just always do that and not rely on POSIX pathname + // resolution to walk the directory (such as by calling WalkDir with + // a trailing slash appended to the path). + // + // Instead, we make a sequence of walk calls — directly and through + // recursive calls to filepath.WalkDir — simulating what WalkDir would do + // if the symlink were a regular directory. + + // First we call walk on the path as a directory + // (instead of a symlink). + err = w.walk(path, fs.FileInfoToDirEntry(fi), nil) + if err == fs.SkipDir { + return nil + } else if err != nil { + // This should be impossible, but handle it anyway in case + // walk is changed to return other errors. + return err } - parentInfo, err := os.Stat(parent) + + // Now read the directory and walk its entries. + ents, err := os.ReadDir(path) if err != nil { - return false + // Report the ReadDir error, as filepath.WalkDir would do. + err = w.walk(path, fs.FileInfoToDirEntry(fi), err) + if err == fs.SkipDir { + return nil + } else if err != nil { + return err // Again, should be impossible. + } + // Fall through and iterate over whatever entries we did manage to get. } - if os.SameFile(ts, parentInfo) { - // Cycle. Don't traverse. - return false + + for _, d := range ents { + nextPath := filepath.Join(path, d.Name()) + if d.IsDir() { + // We want to walk the whole directory tree rooted at nextPath, + // not just the single entry for the directory. + err := filepath.WalkDir(nextPath, w.walk) + if err != nil && w.opts.Logf != nil { + w.opts.Logf("%v", err) + } + } else { + err := w.walk(nextPath, d, nil) + if err == fs.SkipDir { + // Skip the rest of the entries in the parent directory of nextPath + // (that is, path itself). + break + } else if err != nil { + return err // Again, should be impossible. + } + } } - path = parent + return nil } + // Not a file, regular directory, or symlink; skip. + return nil } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 642a5ac2..dd369c07 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -13,6 +13,7 @@ import ( "go/build" "go/parser" "go/token" + "io/fs" "io/ioutil" "os" "path" @@ -26,6 +27,7 @@ import ( "unicode/utf8" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -106,7 +108,7 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File { considerTests := strings.HasSuffix(filename, "_test.go") fileBase := filepath.Base(filename) - packageFileInfos, err := ioutil.ReadDir(srcDir) + packageFileInfos, err := os.ReadDir(srcDir) if err != nil { return nil } @@ -252,7 +254,7 @@ type pass struct { otherFiles []*ast.File // sibling files. // Intermediate state, generated by load. - existingImports map[string]*ImportInfo + existingImports map[string][]*ImportInfo allRefs references missingRefs references @@ -317,7 +319,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { func (p *pass) load() ([]*ImportFix, bool) { p.knownPackages = map[string]*packageInfo{} p.missingRefs = references{} - p.existingImports = map[string]*ImportInfo{} + p.existingImports = map[string][]*ImportInfo{} // Load basic information about the file in question. p.allRefs = collectReferences(p.f) @@ -348,7 +350,7 @@ func (p *pass) load() ([]*ImportFix, bool) { } } for _, imp := range imports { - p.existingImports[p.importIdentifier(imp)] = imp + p.existingImports[p.importIdentifier(imp)] = append(p.existingImports[p.importIdentifier(imp)], imp) } // Find missing references. @@ -387,36 +389,45 @@ func (p *pass) fix() ([]*ImportFix, bool) { // Found everything, or giving up. Add the new imports and remove any unused. var fixes []*ImportFix - for _, imp := range p.existingImports { - // We deliberately ignore globals here, because we can't be sure - // they're in the same package. People do things like put multiple - // main packages in the same directory, and we don't want to - // remove imports if they happen to have the same name as a var in - // a different package. - if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { - fixes = append(fixes, &ImportFix{ - StmtInfo: *imp, - IdentName: p.importIdentifier(imp), - FixType: DeleteImport, - }) - continue - } + for _, identifierImports := range p.existingImports { + for _, imp := range identifierImports { + // We deliberately ignore globals here, because we can't be sure + // they're in the same package. People do things like put multiple + // main packages in the same directory, and we don't want to + // remove imports if they happen to have the same name as a var in + // a different package. + if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, + }) + continue + } - // An existing import may need to update its import name to be correct. - if name := p.importSpecName(imp); name != imp.Name { - fixes = append(fixes, &ImportFix{ - StmtInfo: ImportInfo{ - Name: name, - ImportPath: imp.ImportPath, - }, - IdentName: p.importIdentifier(imp), - FixType: SetImportName, - }) + // An existing import may need to update its import name to be correct. + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, + }, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, + }) + } } } + // Collecting fixes involved map iteration, so sort for stability. See + // golang/go#59976. + sortFixes(fixes) + // collect selected fixes in a separate slice, so that it can be sorted + // separately. Note that these fixes must occur after fixes to existing + // imports. TODO(rfindley): figure out why. + var selectedFixes []*ImportFix for _, imp := range selected { - fixes = append(fixes, &ImportFix{ + selectedFixes = append(selectedFixes, &ImportFix{ StmtInfo: ImportInfo{ Name: p.importSpecName(imp), ImportPath: imp.ImportPath, @@ -425,8 +436,25 @@ func (p *pass) fix() ([]*ImportFix, bool) { FixType: AddImport, }) } + sortFixes(selectedFixes) - return fixes, true + return append(fixes, selectedFixes...), true +} + +func sortFixes(fixes []*ImportFix) { + sort.Slice(fixes, func(i, j int) bool { + fi, fj := fixes[i], fixes[j] + if fi.StmtInfo.ImportPath != fj.StmtInfo.ImportPath { + return fi.StmtInfo.ImportPath < fj.StmtInfo.ImportPath + } + if fi.StmtInfo.Name != fj.StmtInfo.Name { + return fi.StmtInfo.Name < fj.StmtInfo.Name + } + if fi.IdentName != fj.IdentName { + return fi.IdentName < fj.IdentName + } + return fi.FixType < fj.FixType + }) } // importSpecName gets the import name of imp in the import spec. @@ -519,7 +547,7 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { var fixImports = fixImportsDefault func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error { - fixes, err := getFixes(fset, f, filename, env) + fixes, err := getFixes(context.Background(), fset, f, filename, env) if err != nil { return err } @@ -529,7 +557,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P // getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. -func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { +func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { abs, err := filepath.Abs(filename) if err != nil { return nil, err @@ -583,7 +611,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv // Go look for candidates in $GOPATH, etc. We don't necessarily load // the real exports of sibling imports, so keep assuming their contents. - if err := addExternalCandidates(p, p.missingRefs, filename); err != nil { + if err := addExternalCandidates(ctx, p, p.missingRefs, filename); err != nil { return nil, err } @@ -1031,7 +1059,10 @@ type scanCallback struct { exportsLoaded func(pkg *pkg, exports []string) } -func addExternalCandidates(pass *pass, refs references, filename string) error { +func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { + ctx, done := event.Start(ctx, "imports.addExternalCandidates") + defer done() + var mu sync.Mutex found := make(map[string][]pkgDistance) callback := &scanCallback{ @@ -1441,11 +1472,11 @@ func VendorlessPath(ipath string) string { func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { // Look for non-test, buildable .go files which could provide exports. - all, err := ioutil.ReadDir(dir) + all, err := os.ReadDir(dir) if err != nil { return "", nil, err } - var files []os.FileInfo + var files []fs.DirEntry for _, fi := range all { name := fi.Name() if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 95a88383..58e637b9 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,6 +11,7 @@ package imports import ( "bufio" "bytes" + "context" "fmt" "go/ast" "go/format" @@ -23,6 +24,7 @@ import ( "strings" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/event" ) // Options is golang.org/x/tools/imports.Options with extra internal-only options. @@ -66,14 +68,17 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. -func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { +func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + ctx, done := event.Start(ctx, "imports.FixImports") + defer done() + fileSet := token.NewFileSet() file, _, err := parse(fileSet, filename, src, opt) if err != nil { return nil, err } - return getFixes(fileSet, file, filename, opt.Env) + return getFixes(ctx, fileSet, file, filename, opt.Env) } // ApplyFixes applies all of the fixes to the file and formats it. extraMode diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 7d99d04c..5f4d435d 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "os" "path" "path/filepath" @@ -19,6 +18,7 @@ import ( "strings" "golang.org/x/mod/module" + "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -37,7 +37,7 @@ type ModuleResolver struct { mains []*gocommand.ModuleJSON mainByDir map[string]*gocommand.ModuleJSON modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*gocommand.ModuleJSON // ...or Dir. + modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache @@ -123,7 +123,7 @@ func (r *ModuleResolver) init() error { }) sort.Slice(r.modsByDir, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.modsByDir[x].Dir, "/") + return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator)) } return count(j) < count(i) // descending order }) @@ -264,7 +264,7 @@ func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, } // Not cached. Read the filesystem. - pkgFiles, err := ioutil.ReadDir(pkgDir) + pkgFiles, err := os.ReadDir(pkgDir) if err != nil { continue } @@ -327,6 +327,10 @@ func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { // - in /vendor/ in -mod=vendor mode. // - nested module? Dunno. // Rumor has it that replace targets cannot contain other replace targets. + // + // Note that it is critical here that modsByDir is sorted to have deeper dirs + // first. This ensures that findModuleByDir finds the innermost module. + // See also golang/go#56291. for _, m := range r.modsByDir { if !strings.HasPrefix(dir, m.Dir) { continue @@ -365,7 +369,7 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) { readModName := func(modFile string) string { - modBytes, err := ioutil.ReadFile(modFile) + modBytes, err := os.ReadFile(modFile) if err != nil { return "" } @@ -424,6 +428,9 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) ( } func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { + ctx, done := event.Start(ctx, "imports.ModuleResolver.scan") + defer done() + if err := r.init(); err != nil { return err } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index 18dada49..45690abb 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -12,7 +12,7 @@ import ( "golang.org/x/tools/internal/gopathwalk" ) -// To find packages to import, the resolver needs to know about all of the +// To find packages to import, the resolver needs to know about all of // the packages that could be imported. This includes packages that are // already in modules that are in (1) the current module, (2) replace targets, // and (3) packages in the module cache. Packages in (1) and (2) may change over diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 31a75949..9f992c2b 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -93,6 +93,7 @@ var stdlib = map[string][]string{ "Compare", "Contains", "ContainsAny", + "ContainsFunc", "ContainsRune", "Count", "Cut", @@ -147,6 +148,11 @@ var stdlib = map[string][]string{ "TrimSpace", "TrimSuffix", }, + "cmp": { + "Compare", + "Less", + "Ordered", + }, "compress/bzip2": { "NewReader", "StructuralError", @@ -228,6 +234,7 @@ var stdlib = map[string][]string{ "Ring", }, "context": { + "AfterFunc", "Background", "CancelCauseFunc", "CancelFunc", @@ -239,8 +246,11 @@ var stdlib = map[string][]string{ "WithCancel", "WithCancelCause", "WithDeadline", + "WithDeadlineCause", "WithTimeout", + "WithTimeoutCause", "WithValue", + "WithoutCancel", }, "crypto": { "BLAKE2b_256", @@ -445,6 +455,7 @@ var stdlib = map[string][]string{ "XORBytes", }, "crypto/tls": { + "AlertError", "Certificate", "CertificateRequestInfo", "CertificateVerificationError", @@ -476,6 +487,7 @@ var stdlib = map[string][]string{ "LoadX509KeyPair", "NewLRUClientSessionCache", "NewListener", + "NewResumptionState", "NoClientCert", "PKCS1WithSHA1", "PKCS1WithSHA256", @@ -484,6 +496,27 @@ var stdlib = map[string][]string{ "PSSWithSHA256", "PSSWithSHA384", "PSSWithSHA512", + "ParseSessionState", + "QUICClient", + "QUICConfig", + "QUICConn", + "QUICEncryptionLevel", + "QUICEncryptionLevelApplication", + "QUICEncryptionLevelEarly", + "QUICEncryptionLevelHandshake", + "QUICEncryptionLevelInitial", + "QUICEvent", + "QUICEventKind", + "QUICHandshakeDone", + "QUICNoEvent", + "QUICRejectedEarlyData", + "QUICServer", + "QUICSessionTicketOptions", + "QUICSetReadSecret", + "QUICSetWriteSecret", + "QUICTransportParameters", + "QUICTransportParametersRequired", + "QUICWriteData", "RecordHeaderError", "RenegotiateFreelyAsClient", "RenegotiateNever", @@ -493,6 +526,7 @@ var stdlib = map[string][]string{ "RequireAndVerifyClientCert", "RequireAnyClientCert", "Server", + "SessionState", "SignatureScheme", "TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384", @@ -523,6 +557,7 @@ var stdlib = map[string][]string{ "TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_RC4_128_SHA", "VerifyClientCertIfGiven", + "VersionName", "VersionSSL30", "VersionTLS10", "VersionTLS11", @@ -618,6 +653,7 @@ var stdlib = map[string][]string{ "PureEd25519", "RSA", "RevocationList", + "RevocationListEntry", "SHA1WithRSA", "SHA256WithRSA", "SHA256WithRSAPSS", @@ -1002,10 +1038,42 @@ var stdlib = map[string][]string{ "COMPRESS_LOOS", "COMPRESS_LOPROC", "COMPRESS_ZLIB", + "COMPRESS_ZSTD", "Chdr32", "Chdr64", "Class", "CompressionType", + "DF_1_CONFALT", + "DF_1_DIRECT", + "DF_1_DISPRELDNE", + "DF_1_DISPRELPND", + "DF_1_EDITED", + "DF_1_ENDFILTEE", + "DF_1_GLOBAL", + "DF_1_GLOBAUDIT", + "DF_1_GROUP", + "DF_1_IGNMULDEF", + "DF_1_INITFIRST", + "DF_1_INTERPOSE", + "DF_1_KMOD", + "DF_1_LOADFLTR", + "DF_1_NOCOMMON", + "DF_1_NODEFLIB", + "DF_1_NODELETE", + "DF_1_NODIRECT", + "DF_1_NODUMP", + "DF_1_NOHDR", + "DF_1_NOKSYMS", + "DF_1_NOOPEN", + "DF_1_NORELOC", + "DF_1_NOW", + "DF_1_ORIGIN", + "DF_1_PIE", + "DF_1_SINGLETON", + "DF_1_STUB", + "DF_1_SYMINTPOSE", + "DF_1_TRANS", + "DF_1_WEAKFILTER", "DF_BIND_NOW", "DF_ORIGIN", "DF_STATIC_TLS", @@ -1144,6 +1212,7 @@ var stdlib = map[string][]string{ "Dyn32", "Dyn64", "DynFlag", + "DynFlag1", "DynTag", "EI_ABIVERSION", "EI_CLASS", @@ -2111,6 +2180,7 @@ var stdlib = map[string][]string{ "R_PPC64_REL16_LO", "R_PPC64_REL24", "R_PPC64_REL24_NOTOC", + "R_PPC64_REL24_P9NOTOC", "R_PPC64_REL30", "R_PPC64_REL32", "R_PPC64_REL64", @@ -2848,6 +2918,7 @@ var stdlib = map[string][]string{ "MaxVarintLen16", "MaxVarintLen32", "MaxVarintLen64", + "NativeEndian", "PutUvarint", "PutVarint", "Read", @@ -2963,6 +3034,7 @@ var stdlib = map[string][]string{ }, "errors": { "As", + "ErrUnsupported", "Is", "Join", "New", @@ -2989,6 +3061,7 @@ var stdlib = map[string][]string{ "Arg", "Args", "Bool", + "BoolFunc", "BoolVar", "CommandLine", "ContinueOnError", @@ -3119,6 +3192,7 @@ var stdlib = map[string][]string{ "Inspect", "InterfaceType", "IsExported", + "IsGenerated", "KeyValueExpr", "LabeledStmt", "Lbl", @@ -3169,6 +3243,7 @@ var stdlib = map[string][]string{ "ArchChar", "Context", "Default", + "Directive", "FindOnly", "IgnoreVendor", "Import", @@ -3184,6 +3259,7 @@ var stdlib = map[string][]string{ "go/build/constraint": { "AndExpr", "Expr", + "GoVersion", "IsGoBuild", "IsPlusBuild", "NotExpr", @@ -3626,6 +3702,7 @@ var stdlib = map[string][]string{ "ErrBadHTML", "ErrBranchEnd", "ErrEndContext", + "ErrJSTemplate", "ErrNoSuchTemplate", "ErrOutputContext", "ErrPartialCharset", @@ -3870,6 +3947,8 @@ var stdlib = map[string][]string{ "FileInfo", "FileInfoToDirEntry", "FileMode", + "FormatDirEntry", + "FormatFileInfo", "Glob", "GlobFS", "ModeAppend", @@ -3942,6 +4021,78 @@ var stdlib = map[string][]string{ "SetPrefix", "Writer", }, + "log/slog": { + "Any", + "AnyValue", + "Attr", + "Bool", + "BoolValue", + "Debug", + "DebugContext", + "Default", + "Duration", + "DurationValue", + "Error", + "ErrorContext", + "Float64", + "Float64Value", + "Group", + "GroupValue", + "Handler", + "HandlerOptions", + "Info", + "InfoContext", + "Int", + "Int64", + "Int64Value", + "IntValue", + "JSONHandler", + "Kind", + "KindAny", + "KindBool", + "KindDuration", + "KindFloat64", + "KindGroup", + "KindInt64", + "KindLogValuer", + "KindString", + "KindTime", + "KindUint64", + "Level", + "LevelDebug", + "LevelError", + "LevelInfo", + "LevelKey", + "LevelVar", + "LevelWarn", + "Leveler", + "Log", + "LogAttrs", + "LogValuer", + "Logger", + "MessageKey", + "New", + "NewJSONHandler", + "NewLogLogger", + "NewRecord", + "NewTextHandler", + "Record", + "SetDefault", + "Source", + "SourceKey", + "String", + "StringValue", + "TextHandler", + "Time", + "TimeKey", + "TimeValue", + "Uint64", + "Uint64Value", + "Value", + "Warn", + "WarnContext", + "With", + }, "log/syslog": { "Dial", "LOG_ALERT", @@ -3977,6 +4128,13 @@ var stdlib = map[string][]string{ "Priority", "Writer", }, + "maps": { + "Clone", + "Copy", + "DeleteFunc", + "Equal", + "EqualFunc", + }, "math": { "Abs", "Acos", @@ -4371,6 +4529,7 @@ var stdlib = map[string][]string{ "ErrNoLocation", "ErrNotMultipart", "ErrNotSupported", + "ErrSchemeMismatch", "ErrServerClosed", "ErrShortBody", "ErrSkipAltProtocol", @@ -5084,6 +5243,8 @@ var stdlib = map[string][]string{ "NumCPU", "NumCgoCall", "NumGoroutine", + "PanicNilError", + "Pinner", "ReadMemStats", "ReadTrace", "SetBlockProfileRate", @@ -5172,6 +5333,37 @@ var stdlib = map[string][]string{ "Task", "WithRegion", }, + "slices": { + "BinarySearch", + "BinarySearchFunc", + "Clip", + "Clone", + "Compact", + "CompactFunc", + "Compare", + "CompareFunc", + "Contains", + "ContainsFunc", + "Delete", + "DeleteFunc", + "Equal", + "EqualFunc", + "Grow", + "Index", + "IndexFunc", + "Insert", + "IsSorted", + "IsSortedFunc", + "Max", + "MaxFunc", + "Min", + "MinFunc", + "Replace", + "Reverse", + "Sort", + "SortFunc", + "SortStableFunc", + }, "sort": { "Find", "Float64Slice", @@ -5242,6 +5434,7 @@ var stdlib = map[string][]string{ "Compare", "Contains", "ContainsAny", + "ContainsFunc", "ContainsRune", "Count", "Cut", @@ -5299,6 +5492,9 @@ var stdlib = map[string][]string{ "Mutex", "NewCond", "Once", + "OnceFunc", + "OnceValue", + "OnceValues", "Pool", "RWMutex", "WaitGroup", @@ -9135,10 +9331,12 @@ var stdlib = map[string][]string{ "SYS_AIO_CANCEL", "SYS_AIO_ERROR", "SYS_AIO_FSYNC", + "SYS_AIO_MLOCK", "SYS_AIO_READ", "SYS_AIO_RETURN", "SYS_AIO_SUSPEND", "SYS_AIO_SUSPEND_NOCANCEL", + "SYS_AIO_WAITCOMPLETE", "SYS_AIO_WRITE", "SYS_ALARM", "SYS_ARCH_PRCTL", @@ -9368,6 +9566,7 @@ var stdlib = map[string][]string{ "SYS_GET_MEMPOLICY", "SYS_GET_ROBUST_LIST", "SYS_GET_THREAD_AREA", + "SYS_GSSD_SYSCALL", "SYS_GTTY", "SYS_IDENTITYSVC", "SYS_IDLE", @@ -9411,8 +9610,24 @@ var stdlib = map[string][]string{ "SYS_KLDSYM", "SYS_KLDUNLOAD", "SYS_KLDUNLOADF", + "SYS_KMQ_NOTIFY", + "SYS_KMQ_OPEN", + "SYS_KMQ_SETATTR", + "SYS_KMQ_TIMEDRECEIVE", + "SYS_KMQ_TIMEDSEND", + "SYS_KMQ_UNLINK", "SYS_KQUEUE", "SYS_KQUEUE1", + "SYS_KSEM_CLOSE", + "SYS_KSEM_DESTROY", + "SYS_KSEM_GETVALUE", + "SYS_KSEM_INIT", + "SYS_KSEM_OPEN", + "SYS_KSEM_POST", + "SYS_KSEM_TIMEDWAIT", + "SYS_KSEM_TRYWAIT", + "SYS_KSEM_UNLINK", + "SYS_KSEM_WAIT", "SYS_KTIMER_CREATE", "SYS_KTIMER_DELETE", "SYS_KTIMER_GETOVERRUN", @@ -9504,11 +9719,14 @@ var stdlib = map[string][]string{ "SYS_NFSSVC", "SYS_NFSTAT", "SYS_NICE", + "SYS_NLM_SYSCALL", "SYS_NLSTAT", "SYS_NMOUNT", "SYS_NSTAT", "SYS_NTP_ADJTIME", "SYS_NTP_GETTIME", + "SYS_NUMA_GETAFFINITY", + "SYS_NUMA_SETAFFINITY", "SYS_OABI_SYSCALL_BASE", "SYS_OBREAK", "SYS_OLDFSTAT", @@ -9891,6 +10109,7 @@ var stdlib = map[string][]string{ "SYS___ACL_SET_FD", "SYS___ACL_SET_FILE", "SYS___ACL_SET_LINK", + "SYS___CAP_RIGHTS_GET", "SYS___CLONE", "SYS___DISABLE_THREADSIGNAL", "SYS___GETCWD", @@ -10574,6 +10793,7 @@ var stdlib = map[string][]string{ "Short", "T", "TB", + "Testing", "Verbose", }, "testing/fstest": { @@ -10603,6 +10823,9 @@ var stdlib = map[string][]string{ "SetupError", "Value", }, + "testing/slogtest": { + "TestHandler", + }, "text/scanner": { "Char", "Comment", @@ -10826,6 +11049,7 @@ var stdlib = map[string][]string{ "Cs", "Cuneiform", "Cypriot", + "Cypro_Minoan", "Cyrillic", "Dash", "Deprecated", @@ -10889,6 +11113,7 @@ var stdlib = map[string][]string{ "Kaithi", "Kannada", "Katakana", + "Kawi", "Kayah_Li", "Kharoshthi", "Khitan_Small_Script", @@ -10943,6 +11168,7 @@ var stdlib = map[string][]string{ "Myanmar", "N", "Nabataean", + "Nag_Mundari", "Nandinagari", "Nd", "New_Tai_Lue", @@ -10964,6 +11190,7 @@ var stdlib = map[string][]string{ "Old_Sogdian", "Old_South_Arabian", "Old_Turkic", + "Old_Uyghur", "Oriya", "Osage", "Osmanya", @@ -11038,6 +11265,7 @@ var stdlib = map[string][]string{ "Tai_Viet", "Takri", "Tamil", + "Tangsa", "Tangut", "Telugu", "Terminal_Punctuation", @@ -11052,6 +11280,7 @@ var stdlib = map[string][]string{ "ToLower", "ToTitle", "ToUpper", + "Toto", "TurkishCase", "Ugaritic", "Unified_Ideograph", @@ -11061,6 +11290,7 @@ var stdlib = map[string][]string{ "Vai", "Variation_Selector", "Version", + "Vithkuqi", "Wancho", "Warang_Citi", "White_Space", diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index d9950b1f..44719de1 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,10 +5,6 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -import ( - "golang.org/x/tools/internal/gocommand" -) - var GetForTest = func(p interface{}) string { return "" } var GetDepsErrors = func(p interface{}) []*PackageError { return nil } @@ -18,10 +14,6 @@ type PackageError struct { Err string // the error itself } -var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } - -var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} - var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors var ForTest int // must be set as a LoadMode to call GetForTest diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index a3fb2d4f..7e638ec2 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -7,7 +7,9 @@ package tokeninternal import ( + "fmt" "go/token" + "sort" "sync" "unsafe" ) @@ -57,3 +59,93 @@ func GetLines(file *token.File) []int { panic("unexpected token.File size") } } + +// AddExistingFiles adds the specified files to the FileSet if they +// are not already present. It panics if any pair of files in the +// resulting FileSet would overlap. +func AddExistingFiles(fset *token.FileSet, files []*token.File) { + // Punch through the FileSet encapsulation. + type tokenFileSet struct { + // This type remained essentially consistent from go1.16 to go1.21. + mutex sync.RWMutex + base int + files []*token.File + _ *token.File // changed to atomic.Pointer[token.File] in go1.19 + } + + // If the size of token.FileSet changes, this will fail to compile. + const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) + var _ [-delta * delta]int + + type uP = unsafe.Pointer + var ptr *tokenFileSet + *(*uP)(uP(&ptr)) = uP(fset) + ptr.mutex.Lock() + defer ptr.mutex.Unlock() + + // Merge and sort. + newFiles := append(ptr.files, files...) + sort.Slice(newFiles, func(i, j int) bool { + return newFiles[i].Base() < newFiles[j].Base() + }) + + // Reject overlapping files. + // Discard adjacent identical files. + out := newFiles[:0] + for i, file := range newFiles { + if i > 0 { + prev := newFiles[i-1] + if file == prev { + continue + } + if prev.Base()+prev.Size()+1 > file.Base() { + panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", + prev.Name(), prev.Base(), prev.Base()+prev.Size(), + file.Name(), file.Base(), file.Base()+file.Size())) + } + } + out = append(out, file) + } + newFiles = out + + ptr.files = newFiles + + // Advance FileSet.Base(). + if len(newFiles) > 0 { + last := newFiles[len(newFiles)-1] + newBase := last.Base() + last.Size() + 1 + if ptr.base < newBase { + ptr.base = newBase + } + } +} + +// FileSetFor returns a new FileSet containing a sequence of new Files with +// the same base, size, and line as the input files, for use in APIs that +// require a FileSet. +// +// Precondition: the input files must be non-overlapping, and sorted in order +// of their Base. +func FileSetFor(files ...*token.File) *token.FileSet { + fset := token.NewFileSet() + for _, f := range files { + f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) + lines := GetLines(f) + f2.SetLines(lines) + } + return fset +} + +// CloneFileSet creates a new FileSet holding all files in fset. It does not +// create copies of the token.Files in fset: they are added to the resulting +// FileSet unmodified. +func CloneFileSet(fset *token.FileSet) *token.FileSet { + var files []*token.File + fset.Iterate(func(f *token.File) bool { + files = append(files, f) + return true + }) + newFileSet := token.NewFileSet() + AddExistingFiles(newFileSet, files) + return newFileSet +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 25a1426d..cdab9885 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -23,6 +23,7 @@ package typeparams import ( + "fmt" "go/ast" "go/token" "go/types" @@ -41,7 +42,7 @@ func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Ex switch e := n.(type) { case *ast.IndexExpr: return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *IndexListExpr: + case *ast.IndexListExpr: return e.X, e.Lbrack, e.Indices, e.Rbrack } return nil, token.NoPos, nil, token.NoPos @@ -62,7 +63,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke Rbrack: rbrack, } default: - return &IndexListExpr{ + return &ast.IndexListExpr{ X: x, Lbrack: lbrack, Indices: indices, @@ -73,7 +74,7 @@ func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack toke // IsTypeParam reports whether t is a type parameter. func IsTypeParam(t types.Type) bool { - _, ok := t.(*TypeParam) + _, ok := t.(*types.TypeParam) return ok } @@ -87,7 +88,6 @@ func IsTypeParam(t types.Type) bool { func OriginMethod(fn *types.Func) *types.Func { recv := fn.Type().(*types.Signature).Recv() if recv == nil { - return fn } base := recv.Type() @@ -100,12 +100,37 @@ func OriginMethod(fn *types.Func) *types.Func { // Receiver is a *types.Interface. return fn } - if ForNamed(named).Len() == 0 { + if named.TypeParams().Len() == 0 { // Receiver base has no type parameters, so we can avoid the lookup below. return fn } - orig := NamedTypeOrigin(named) + orig := named.Origin() gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + + // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: + // package p + // type T *int + // func (*T) f() {} + // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. + // Here we make them consistent by force. + // (The go/types bug is general, but this workaround is reached only + // for generic T thanks to the early return above.) + if gfn == nil { + mset := types.NewMethodSet(types.NewPointer(orig)) + for i := 0; i < mset.Len(); i++ { + m := mset.At(i) + if m.Obj().Id() == fn.Id() { + gfn = m.Obj() + break + } + } + } + + // In golang/go#61196, we observe another crash, this time inexplicable. + if gfn == nil { + panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) + } + return gfn.(*types.Func) } @@ -132,7 +157,7 @@ func OriginMethod(fn *types.Func) *types.Func { // // In this case, GenericAssignableTo reports that instantiations of Container // are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { // If V and T are not both named, or do not have matching non-empty type // parameter lists, fall back on types.AssignableTo. @@ -142,9 +167,9 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { return types.AssignableTo(V, T) } - vtparams := ForNamed(VN) - ttparams := ForNamed(TN) - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { return types.AssignableTo(V, T) } @@ -157,7 +182,7 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { // Minor optimization: ensure we share a context across the two // instantiations below. if ctxt == nil { - ctxt = NewContext() + ctxt = types.NewContext() } var targs []types.Type @@ -165,12 +190,12 @@ func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { targs = append(targs, vtparams.At(i)) } - vinst, err := Instantiate(ctxt, V, targs, true) + vinst, err := types.Instantiate(ctxt, V, targs, true) if err != nil { panic("type parameters should satisfy their own constraints") } - tinst, err := Instantiate(ctxt, T, targs, true) + tinst, err := types.Instantiate(ctxt, T, targs, true) if err != nil { return false } diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 993135ec..7ea8840e 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -81,13 +81,13 @@ func CoreType(T types.Type) types.Type { // restrictions may be arbitrarily complex. For example, consider the // following: // -// type A interface{ ~string|~[]byte } +// type A interface{ ~string|~[]byte } // -// type B interface{ int|string } +// type B interface{ int|string } // -// type C interface { ~string|~int } +// type C interface { ~string|~int } // -// type T[P interface{ A|B; C }] int +// type T[P interface{ A|B; C }] int // // In this example, the structural type restriction of P is ~string|int: A|B // expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, @@ -108,15 +108,15 @@ func CoreType(T types.Type) types.Type { // // _NormalTerms makes no guarantees about the order of terms, except that it // is deterministic. -func _NormalTerms(typ types.Type) ([]*Term, error) { +func _NormalTerms(typ types.Type) ([]*types.Term, error) { switch typ := typ.(type) { - case *TypeParam: + case *types.TypeParam: return StructuralTerms(typ) - case *Union: + case *types.Union: return UnionTermSet(typ) case *types.Interface: return InterfaceTermSet(typ) default: - return []*Term{NewTerm(false, typ)}, nil + return []*types.Term{types.NewTerm(false, typ)}, nil } } diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go deleted file mode 100644 index 18212390..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go deleted file mode 100644 index d6714882..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -// Note: this constant is in a separate file as this is the only acceptable -// diff between the <1.18 API of this package and the 1.18 API. - -// Enabled reports whether type parameters are enabled in the current build -// environment. -const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go index 9c631b65..93c80fdc 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -60,7 +60,7 @@ var ErrEmptyTypeSet = errors.New("empty type set") // // StructuralTerms makes no guarantees about the order of terms, except that it // is deterministic. -func StructuralTerms(tparam *TypeParam) ([]*Term, error) { +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { constraint := tparam.Constraint() if constraint == nil { return nil, fmt.Errorf("%s has nil constraint", tparam) @@ -78,7 +78,7 @@ func StructuralTerms(tparam *TypeParam) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { return computeTermSet(iface) } @@ -88,11 +88,11 @@ func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { // // See the documentation of StructuralTerms for more information on // normalization. -func UnionTermSet(union *Union) ([]*Term, error) { +func UnionTermSet(union *types.Union) ([]*types.Term, error) { return computeTermSet(union) } -func computeTermSet(typ types.Type) ([]*Term, error) { +func computeTermSet(typ types.Type) ([]*types.Term, error) { tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) if err != nil { return nil, err @@ -103,9 +103,9 @@ func computeTermSet(typ types.Type) ([]*Term, error) { if tset.terms.isAll() { return nil, nil } - var terms []*Term + var terms []*types.Term for _, term := range tset.terms { - terms = append(terms, NewTerm(term.tilde, term.typ)) + terms = append(terms, types.NewTerm(term.tilde, term.typ)) } return terms, nil } @@ -162,7 +162,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in tset.terms = allTermlist for i := 0; i < u.NumEmbeddeds(); i++ { embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*TypeParam); ok { + if _, ok := embedded.Underlying().(*types.TypeParam); ok { return nil, fmt.Errorf("invalid embedded type %T", embedded) } tset2, err := computeTermSetInternal(embedded, seen, depth+1) @@ -171,7 +171,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in } tset.terms = tset.terms.intersect(tset2.terms) } - case *Union: + case *types.Union: // The term set of a union is the union of term sets of its terms. tset.terms = nil for i := 0; i < u.Len(); i++ { @@ -184,7 +184,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, err } terms = tset2.terms - case *TypeParam, *Union: + case *types.TypeParam, *types.Union: // A stand-alone type parameter or union is not permitted as union // term. return nil, fmt.Errorf("invalid union term %T", t) @@ -199,7 +199,7 @@ func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth in return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) } } - case *TypeParam: + case *types.TypeParam: panic("unreachable") default: // For all other types, the term set is just a single non-tilde term diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go index 933106a2..cbd12f80 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -30,7 +30,7 @@ func (xl termlist) String() string { var buf bytes.Buffer for i, x := range xl { if i > 0 { - buf.WriteString(" ∪ ") + buf.WriteString(" | ") } buf.WriteString(x.String()) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go deleted file mode 100644 index b4788978..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package typeparams - -import ( - "go/ast" - "go/token" - "go/types" -) - -func unsupported() { - panic("type parameters are unsupported at this go version") -} - -// IndexListExpr is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type IndexListExpr struct { - ast.Expr - X ast.Expr // expression - Lbrack token.Pos // position of "[" - Indices []ast.Expr // index expressions - Rbrack token.Pos // position of "]" -} - -// ForTypeSpec returns an empty field list, as type parameters on not supported -// at this Go version. -func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { - return nil -} - -// ForFuncType returns an empty field list, as type parameters are not -// supported at this Go version. -func ForFuncType(*ast.FuncType) *ast.FieldList { - return nil -} - -// TypeParam is a placeholder type, as type parameters are not supported at -// this Go version. Its methods panic on use. -type TypeParam struct{ types.Type } - -func (*TypeParam) Index() int { unsupported(); return 0 } -func (*TypeParam) Constraint() types.Type { unsupported(); return nil } -func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } - -// TypeParamList is a placeholder for an empty type parameter list. -type TypeParamList struct{} - -func (*TypeParamList) Len() int { return 0 } -func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } - -// TypeList is a placeholder for an empty type list. -type TypeList struct{} - -func (*TypeList) Len() int { return 0 } -func (*TypeList) At(int) types.Type { unsupported(); return nil } - -// NewTypeParam is unsupported at this Go version, and panics. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - unsupported() - return nil -} - -// SetTypeParamConstraint is unsupported at this Go version, and panics. -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - unsupported() -} - -// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or -// typeParams is non-empty. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - if len(recvTypeParams) != 0 || len(typeParams) != 0 { - panic("signatures cannot have type parameters at this Go version") - } - return types.NewSignature(recv, params, results, variadic) -} - -// ForSignature returns an empty slice. -func ForSignature(*types.Signature) *TypeParamList { - return nil -} - -// RecvTypeParams returns a nil slice. -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return nil -} - -// IsComparable returns false, as no interfaces are type-restricted at this Go -// version. -func IsComparable(*types.Interface) bool { - return false -} - -// IsMethodSet returns true, as no interfaces are type-restricted at this Go -// version. -func IsMethodSet(*types.Interface) bool { - return true -} - -// IsImplicit returns false, as no interfaces are implicit at this Go version. -func IsImplicit(*types.Interface) bool { - return false -} - -// MarkImplicit does nothing, because this Go version does not have implicit -// interfaces. -func MarkImplicit(*types.Interface) {} - -// ForNamed returns an empty type parameter list, as type parameters are not -// supported at this Go version. -func ForNamed(*types.Named) *TypeParamList { - return nil -} - -// SetForNamed panics if tparams is non-empty. -func SetForNamed(_ *types.Named, tparams []*TypeParam) { - if len(tparams) > 0 { - unsupported() - } -} - -// NamedTypeArgs returns nil. -func NamedTypeArgs(*types.Named) *TypeList { - return nil -} - -// NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) types.Type { - return named -} - -// Term holds information about a structural type restriction. -type Term struct { - tilde bool - typ types.Type -} - -func (m *Term) Tilde() bool { return m.tilde } -func (m *Term) Type() types.Type { return m.typ } -func (m *Term) String() string { - pre := "" - if m.tilde { - pre = "~" - } - return pre + m.typ.String() -} - -// NewTerm is unsupported at this Go version, and panics. -func NewTerm(tilde bool, typ types.Type) *Term { - return &Term{tilde, typ} -} - -// Union is a placeholder type, as type parameters are not supported at this Go -// version. Its methods panic on use. -type Union struct{ types.Type } - -func (*Union) Len() int { return 0 } -func (*Union) Term(i int) *Term { unsupported(); return nil } - -// NewUnion is unsupported at this Go version, and panics. -func NewUnion(terms []*Term) *Union { - unsupported() - return nil -} - -// InitInstanceInfo is a noop at this Go version. -func InitInstanceInfo(*types.Info) {} - -// Instance is a placeholder type, as type parameters are not supported at this -// Go version. -type Instance struct { - TypeArgs *TypeList - Type types.Type -} - -// GetInstances returns a nil map, as type parameters are not supported at this -// Go version. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } - -// Context is a placeholder type, as type parameters are not supported at -// this Go version. -type Context struct{} - -// NewContext returns a placeholder Context instance. -func NewContext() *Context { - return &Context{} -} - -// Instantiate is unsupported on this Go version, and panics. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - unsupported() - return nil, nil -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go deleted file mode 100644 index 114a36b8..00000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typeparams - -import ( - "go/ast" - "go/types" -) - -// IndexListExpr is an alias for ast.IndexListExpr. -type IndexListExpr = ast.IndexListExpr - -// ForTypeSpec returns n.TypeParams. -func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// ForFuncType returns n.TypeParams. -func ForFuncType(n *ast.FuncType) *ast.FieldList { - if n == nil { - return nil - } - return n.TypeParams -} - -// TypeParam is an alias for types.TypeParam -type TypeParam = types.TypeParam - -// TypeParamList is an alias for types.TypeParamList -type TypeParamList = types.TypeParamList - -// TypeList is an alias for types.TypeList -type TypeList = types.TypeList - -// NewTypeParam calls types.NewTypeParam. -func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { - return types.NewTypeParam(name, constraint) -} - -// SetTypeParamConstraint calls tparam.SetConstraint(constraint). -func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { - tparam.SetConstraint(constraint) -} - -// NewSignatureType calls types.NewSignatureType. -func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { - return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) -} - -// ForSignature returns sig.TypeParams() -func ForSignature(sig *types.Signature) *TypeParamList { - return sig.TypeParams() -} - -// RecvTypeParams returns sig.RecvTypeParams(). -func RecvTypeParams(sig *types.Signature) *TypeParamList { - return sig.RecvTypeParams() -} - -// IsComparable calls iface.IsComparable(). -func IsComparable(iface *types.Interface) bool { - return iface.IsComparable() -} - -// IsMethodSet calls iface.IsMethodSet(). -func IsMethodSet(iface *types.Interface) bool { - return iface.IsMethodSet() -} - -// IsImplicit calls iface.IsImplicit(). -func IsImplicit(iface *types.Interface) bool { - return iface.IsImplicit() -} - -// MarkImplicit calls iface.MarkImplicit(). -func MarkImplicit(iface *types.Interface) { - iface.MarkImplicit() -} - -// ForNamed extracts the (possibly empty) type parameter object list from -// named. -func ForNamed(named *types.Named) *TypeParamList { - return named.TypeParams() -} - -// SetForNamed sets the type params tparams on n. Each tparam must be of -// dynamic type *types.TypeParam. -func SetForNamed(n *types.Named, tparams []*TypeParam) { - n.SetTypeParams(tparams) -} - -// NamedTypeArgs returns named.TypeArgs(). -func NamedTypeArgs(named *types.Named) *TypeList { - return named.TypeArgs() -} - -// NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) types.Type { - return named.Origin() -} - -// Term is an alias for types.Term. -type Term = types.Term - -// NewTerm calls types.NewTerm. -func NewTerm(tilde bool, typ types.Type) *Term { - return types.NewTerm(tilde, typ) -} - -// Union is an alias for types.Union -type Union = types.Union - -// NewUnion calls types.NewUnion. -func NewUnion(terms []*Term) *Union { - return types.NewUnion(terms) -} - -// InitInstanceInfo initializes info to record information about type and -// function instances. -func InitInstanceInfo(info *types.Info) { - info.Instances = make(map[*ast.Ident]types.Instance) -} - -// Instance is an alias for types.Instance. -type Instance = types.Instance - -// GetInstances returns info.Instances. -func GetInstances(info *types.Info) map[*ast.Ident]Instance { - return info.Instances -} - -// Context is an alias for types.Context. -type Context = types.Context - -// NewContext calls types.NewContext. -func NewContext() *Context { - return types.NewContext() -} - -// Instantiate calls types.Instantiate. -func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { - return types.Instantiate(ctxt, typ, targs, validate) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go index 7ddee28d..7350bb70 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -10,11 +10,10 @@ import "go/types" // A term describes elementary type sets: // -// ∅: (*term)(nil) == ∅ // set of no types (empty set) -// ð“¤: &term{} == 𓤠// set of all types (ð“¤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// ð“¤: &term{} == 𓤠// set of all types (ð“¤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t type term struct { tilde bool // valid if typ != nil typ types.Type diff --git a/vendor/golang.org/x/tools/internal/versions/gover.go b/vendor/golang.org/x/tools/internal/versions/gover.go new file mode 100644 index 00000000..bbabcd22 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/gover.go @@ -0,0 +1,172 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This is a fork of internal/gover for use by x/tools until +// go1.21 and earlier are no longer supported by x/tools. + +package versions + +import "strings" + +// A gover is a parsed Go gover: major[.Minor[.Patch]][kind[pre]] +// The numbers are the original decimal strings to avoid integer overflows +// and since there is very little actual math. (Probably overflow doesn't matter in practice, +// but at the time this code was written, there was an existing test that used +// go1.99999999999, which does not fit in an int on 32-bit platforms. +// The "big decimal" representation avoids the problem entirely.) +type gover struct { + major string // decimal + minor string // decimal or "" + patch string // decimal or "" + kind string // "", "alpha", "beta", "rc" + pre string // decimal or "" +} + +// compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as toolchain versions. +// The versions x and y must not begin with a "go" prefix: just "1.21" not "go1.21". +// Malformed versions compare less than well-formed versions and equal to each other. +// The language version "1.21" compares less than the release candidate and eventual releases "1.21rc1" and "1.21.0". +func compare(x, y string) int { + vx := parse(x) + vy := parse(y) + + if c := cmpInt(vx.major, vy.major); c != 0 { + return c + } + if c := cmpInt(vx.minor, vy.minor); c != 0 { + return c + } + if c := cmpInt(vx.patch, vy.patch); c != 0 { + return c + } + if c := strings.Compare(vx.kind, vy.kind); c != 0 { // "" < alpha < beta < rc + return c + } + if c := cmpInt(vx.pre, vy.pre); c != 0 { + return c + } + return 0 +} + +// lang returns the Go language version. For example, lang("1.2.3") == "1.2". +func lang(x string) string { + v := parse(x) + if v.minor == "" || v.major == "1" && v.minor == "0" { + return v.major + } + return v.major + "." + v.minor +} + +// isValid reports whether the version x is valid. +func isValid(x string) bool { + return parse(x) != gover{} +} + +// parse parses the Go version string x into a version. +// It returns the zero version if x is malformed. +func parse(x string) gover { + var v gover + + // Parse major version. + var ok bool + v.major, x, ok = cutInt(x) + if !ok { + return gover{} + } + if x == "" { + // Interpret "1" as "1.0.0". + v.minor = "0" + v.patch = "0" + return v + } + + // Parse . before minor version. + if x[0] != '.' { + return gover{} + } + + // Parse minor version. + v.minor, x, ok = cutInt(x[1:]) + if !ok { + return gover{} + } + if x == "" { + // Patch missing is same as "0" for older versions. + // Starting in Go 1.21, patch missing is different from explicit .0. + if cmpInt(v.minor, "21") < 0 { + v.patch = "0" + } + return v + } + + // Parse patch if present. + if x[0] == '.' { + v.patch, x, ok = cutInt(x[1:]) + if !ok || x != "" { + // Note that we are disallowing prereleases (alpha, beta, rc) for patch releases here (x != ""). + // Allowing them would be a bit confusing because we already have: + // 1.21 < 1.21rc1 + // But a prerelease of a patch would have the opposite effect: + // 1.21.3rc1 < 1.21.3 + // We've never needed them before, so let's not start now. + return gover{} + } + return v + } + + // Parse prerelease. + i := 0 + for i < len(x) && (x[i] < '0' || '9' < x[i]) { + if x[i] < 'a' || 'z' < x[i] { + return gover{} + } + i++ + } + if i == 0 { + return gover{} + } + v.kind, x = x[:i], x[i:] + if x == "" { + return v + } + v.pre, x, ok = cutInt(x) + if !ok || x != "" { + return gover{} + } + + return v +} + +// cutInt scans the leading decimal number at the start of x to an integer +// and returns that value and the rest of the string. +func cutInt(x string) (n, rest string, ok bool) { + i := 0 + for i < len(x) && '0' <= x[i] && x[i] <= '9' { + i++ + } + if i == 0 || x[0] == '0' && i != 1 { // no digits or unnecessary leading zero + return "", "", false + } + return x[:i], x[i:], true +} + +// cmpInt returns cmp.Compare(x, y) interpreting x and y as decimal numbers. +// (Copied from golang.org/x/mod/semver's compareInt.) +func cmpInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go new file mode 100644 index 00000000..562eef21 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +import ( + "go/types" +) + +// GoVersion returns the Go version of the type package. +// It returns zero if no version can be determined. +func GoVersion(pkg *types.Package) string { + // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. + if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { + return pkg.GoVersion() + } + return "" +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go new file mode 100644 index 00000000..a7b79207 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -0,0 +1,20 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions always reports the a file's Go version as the +// zero version at this Go version. +func FileVersions(info *types.Info, file *ast.File) string { return "" } + +// InitFileVersions is a noop at this Go version. +func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go new file mode 100644 index 00000000..7b9ba89a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package versions + +import ( + "go/ast" + "go/types" +) + +// FileVersions maps a file to the file's semantic Go version. +// The reported version is the zero version if a version cannot be determined. +func FileVersions(info *types.Info, file *ast.File) string { + return info.FileVersions[file] +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) +} diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go new file mode 100644 index 00000000..e16f6c33 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/versions.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// Note: If we use build tags to use go/versions when go >=1.22, +// we run into go.dev/issue/53737. Under some operations users would see an +// import of "go/versions" even if they would not compile the file. +// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include +// For this reason, this library just a clone of go/versions for the moment. + +// Lang returns the Go language version for version x. +// If x is not a valid version, Lang returns the empty string. +// For example: +// +// Lang("go1.21rc2") = "go1.21" +// Lang("go1.21.2") = "go1.21" +// Lang("go1.21") = "go1.21" +// Lang("go1") = "go1" +// Lang("bad") = "" +// Lang("1.21") = "" +func Lang(x string) string { + v := lang(stripGo(x)) + if v == "" { + return "" + } + return x[:2+len(v)] // "go"+v without allocation +} + +// Compare returns -1, 0, or +1 depending on whether +// x < y, x == y, or x > y, interpreted as Go versions. +// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21". +// Invalid versions, including the empty string, compare less than +// valid versions and equal to each other. +// The language version "go1.21" compares less than the +// release candidate and eventual releases "go1.21rc1" and "go1.21.0". +// Custom toolchain suffixes are ignored during comparison: +// "go1.21.0" and "go1.21.0-bigcorp" are equal. +func Compare(x, y string) int { return compare(stripGo(x), stripGo(y)) } + +// IsValid reports whether the version x is valid. +func IsValid(x string) bool { return isValid(stripGo(x)) } + +// stripGo converts from a "go1.21" version to a "1.21" version. +// If v does not start with "go", stripGo returns the empty string (a known invalid version). +func stripGo(v string) string { + if len(v) < 2 || v[:2] != "go" { + return "" + } + return v[2:] +} diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go index cecbb9ba..73694e33 100644 --- a/vendor/google.golang.org/api/internal/cba.go +++ b/vendor/google.golang.org/api/internal/cba.go @@ -35,6 +35,7 @@ package internal import ( "context" "crypto/tls" + "errors" "net" "net/url" "os" @@ -53,6 +54,12 @@ const ( // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" +) + +var ( + errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") ) // getClientCertificateSourceAndEndpoint is a convenience function that invokes @@ -67,6 +74,14 @@ func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, if err != nil { return nil, "", err } + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + if settings.Endpoint == "" && !settings.IsUniverseDomainGDU() && settings.DefaultEndpointTemplate != "" { + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + // if settings.DefaultEndpointTemplate == "" { + // return nil, "", errors.New("internaloption.WithDefaultEndpointTemplate is required if option.WithUniverseDomain is not googleapis.com") + // } + endpoint = resolvedDefaultEndpoint(settings) + } return clientCertSource, endpoint, nil } @@ -80,9 +95,7 @@ type transportConfig struct { func getTransportConfig(settings *DialSettings) (*transportConfig, error) { clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) if err != nil { - return &transportConfig{ - clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "", - }, err + return nil, err } defaultTransportConfig := transportConfig{ clientCertSource: clientCertSource, @@ -91,21 +104,13 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { s2aMTLSEndpoint: "", } - // Check the env to determine whether to use S2A. - if !isGoogleS2AEnabled() { + if !shouldUseS2A(clientCertSource, settings) { return &defaultTransportConfig, nil } - - // If client cert is found, use that over S2A. - // If MTLS is not enabled for the endpoint, skip S2A. - if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { - return &defaultTransportConfig, nil - } - s2aMTLSEndpoint := settings.DefaultMTLSEndpoint - // If there is endpoint override, honor it. - if settings.Endpoint != "" { - s2aMTLSEndpoint = endpoint + if !settings.IsUniverseDomainGDU() { + return nil, errUniverseNotSupportedMTLS } + s2aAddress := GetS2AAddress() if s2aAddress == "" { return &defaultTransportConfig, nil @@ -114,14 +119,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { clientCertSource: clientCertSource, endpoint: endpoint, s2aAddress: s2aAddress, - s2aMTLSEndpoint: s2aMTLSEndpoint, + s2aMTLSEndpoint: settings.DefaultMTLSEndpoint, }, nil } -func isGoogleS2AEnabled() bool { - return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" -} - // getClientCertificateSource returns a default client certificate source, if // not provided by the user. // @@ -163,24 +164,41 @@ func isClientCertificateEnabled() bool { // WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { if settings.Endpoint == "" { - mtlsMode := getMTLSMode() - if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + if isMTLS(clientCertSource) { + if !settings.IsUniverseDomainGDU() { + return "", errUniverseNotSupportedMTLS + } return settings.DefaultMTLSEndpoint, nil } - return settings.DefaultEndpoint, nil + return resolvedDefaultEndpoint(settings), nil } if strings.Contains(settings.Endpoint, "://") { // User passed in a full URL path, use it verbatim. return settings.Endpoint, nil } - if settings.DefaultEndpoint == "" { + if resolvedDefaultEndpoint(settings) == "" { // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. return settings.Endpoint, nil } // Assume user-provided endpoint is host[:port], merge it with the default endpoint. - return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) + return mergeEndpoints(resolvedDefaultEndpoint(settings), settings.Endpoint) +} + +func isMTLS(clientCertSource cert.Source) bool { + mtlsMode := getMTLSMode() + return mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) +} + +// resolvedDefaultEndpoint returns the DefaultEndpointTemplate merged with the +// Universe Domain if the DefaultEndpointTemplate is set, otherwise returns the +// deprecated DefaultEndpoint value. +func resolvedDefaultEndpoint(settings *DialSettings) string { + if settings.DefaultEndpointTemplate == "" { + return settings.DefaultEndpoint + } + return strings.Replace(settings.DefaultEndpointTemplate, universeDomainPlaceholder, settings.GetUniverseDomain(), 1) } func getMTLSMode() string { @@ -275,8 +293,26 @@ func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, fun return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil } -// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. -var mtlsEndpointEnabledForS2A = func() bool { - // TODO(xmenxk): determine this via discovery config. +func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool { + // If client cert is found, use that over S2A. + if clientCertSource != nil { + return false + } + // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A. + if !isGoogleS2AEnabled() { + return false + } + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if settings.DefaultMTLSEndpoint == "" || settings.Endpoint != "" { + return false + } + // If custom HTTP client is provided, skip S2A. + if settings.HTTPClient != nil { + return false + } return true } + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 92b3acf6..b6489309 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -16,6 +16,7 @@ import ( "time" "golang.org/x/oauth2" + "google.golang.org/api/internal/cert" "google.golang.org/api/internal/impersonate" "golang.org/x/oauth2/google" @@ -78,9 +79,8 @@ const ( // met: // // (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users +// (a) Scope for self-signed JWT flow is enabled +// (b) Audiences are explicitly provided by users // (2) No service account impersontation // // - Otherwise, executes standard OAuth 2.0 flow @@ -91,11 +91,11 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g // Determine configurations for the OAuth2 transport, which is separate from the API transport. // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + clientCertSource, err := getClientCertificateSource(ds) if err != nil { return nil, err } - params.TokenURL = oauth2Endpoint + params.TokenURL = oAuth2Endpoint(clientCertSource) if clientCertSource != nil { tlsConfig := &tls.Config{ GetClientCertificate: clientCertSource, @@ -125,22 +125,37 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g return cred, err } +func oAuth2Endpoint(clientCertSource cert.Source) string { + if isMTLS(clientCertSource) { + return google.MTLSTokenURL + } + return google.Endpoint.TokenURL +} + func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) { - if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && - ds.ImpersonationConfig == nil { - // Check if JSON is a service account and if so create a self-signed JWT. - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. - } - if err := json.Unmarshal(data, &f); err != nil { - return false, err - } - return f.Type == serviceAccountKey, nil + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs with scopes. + if !ds.IsUniverseDomainGDU() { + return typeServiceAccount(data) + } + if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && ds.ImpersonationConfig == nil { + return typeServiceAccount(data) } return false, nil } +// typeServiceAccount checks if JSON data is for a service account. +func typeServiceAccount(data []byte) (bool, error) { + var f struct { + Type string `json:"type"` + // The remaining JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(data, &f); err != nil { + return false, err + } + return f.Type == serviceAccountKey, nil +} + func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) { if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() { // Scopes are preferred in self-signed JWT unless the scope is not available @@ -189,15 +204,6 @@ func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds * }, nil } -// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport. -func oauth2DialSettings(ds *DialSettings) *DialSettings { - var ods DialSettings - ods.DefaultEndpoint = google.Endpoint.TokenURL - ods.DefaultMTLSEndpoint = google.MTLSTokenURL - ods.ClientCertSource = ds.ClientCertSource - return &ods -} - // customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. func customHTTPClient(tlsConfig *tls.Config) *http.Client { trans := baseTransport() @@ -220,3 +226,14 @@ func baseTransport() *http.Transport { ExpectContinueTimeout: 1 * time.Second, } } + +// ErrUniverseNotMatch composes an error string from the provided universe +// domain sources (DialSettings and Credentials, respectively). +func ErrUniverseNotMatch(settingsUD, credsUD string) error { + return fmt.Errorf( + "the configured universe domain (%q) does not match the universe "+ + "domain found in the credentials (%q). If you haven't configured "+ + "WithUniverseDomain explicitly, \"googleapis.com\" is the default", + settingsUD, + credsUD) +} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index 693a1b1a..f39dd00d 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -15,6 +15,7 @@ import ( "github.com/google/uuid" "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" ) // Use this error type to return an error which allows introspection of both @@ -43,6 +44,16 @@ func (e wrappedCallErr) Is(target error) bool { // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { @@ -77,6 +88,16 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re // req.WithContext, then calls any functions returned by the hooks in // reverse order. func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) { + // Add headers set in context metadata. + if ctx != nil { + headers := callctx.HeadersFromContext(ctx) + for k, vals := range headers { + for _, v := range vals { + req.Header.Add(k, v) + } + } + } + // Disallow Accept-Encoding because it interferes with the automatic gzip handling // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. if _, ok := req.Header["Accept-Encoding"]; ok { diff --git a/vendor/google.golang.org/api/internal/s2a.go b/vendor/google.golang.org/api/internal/s2a.go index c5b421f5..c70f2419 100644 --- a/vendor/google.golang.org/api/internal/s2a.go +++ b/vendor/google.golang.org/api/internal/s2a.go @@ -13,7 +13,7 @@ import ( "cloud.google.com/go/compute/metadata" ) -const configEndpointSuffix = "googleAutoMtlsConfiguration" +const configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" // The period an MTLS config can be reused before needing refresh. var configExpiry = time.Hour diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 3a3874df..99210ebe 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -9,6 +9,8 @@ import ( "crypto/tls" "errors" "net/http" + "os" + "strconv" "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -16,11 +18,17 @@ import ( "google.golang.org/grpc" ) +const ( + newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" + universeDomainDefault = "googleapis.com" +) + // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { Endpoint string DefaultEndpoint string + DefaultEndpointTemplate string DefaultMTLSEndpoint string Scopes []string DefaultScopes []string @@ -47,7 +55,10 @@ type DialSettings struct { ImpersonationConfig *impersonate.Config EnableDirectPath bool EnableDirectPathXds bool + EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool + UniverseDomain string + DefaultUniverseDomain string // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters @@ -77,6 +88,16 @@ func (ds *DialSettings) HasCustomAudience() bool { return len(ds.Audiences) > 0 } +func (ds *DialSettings) IsNewAuthLibraryEnabled() bool { + if ds.EnableNewAuthLibrary { + return true + } + if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnVar)); err == nil { + return b + } + return false +} + // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { if ds.SkipValidation { @@ -141,3 +162,28 @@ func (ds *DialSettings) Validate() error { } return nil } + +// GetDefaultUniverseDomain returns the default service domain for a given Cloud +// universe, as configured with internaloption.WithDefaultUniverseDomain. +// The default value is "googleapis.com". +func (ds *DialSettings) GetDefaultUniverseDomain() string { + if ds.DefaultUniverseDomain == "" { + return universeDomainDefault + } + return ds.DefaultUniverseDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe, as configured with option.WithUniverseDomain. +// The default value is the value of GetDefaultUniverseDomain, as configured +// with internaloption.WithDefaultUniverseDomain. +func (ds *DialSettings) GetUniverseDomain() string { + if ds.UniverseDomain == "" { + return ds.GetDefaultUniverseDomain() + } + return ds.UniverseDomain +} + +func (ds *DialSettings) IsUniverseDomainGDU() bool { + return ds.GetUniverseDomain() == ds.GetDefaultUniverseDomain() +} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 053e4ee2..64e84963 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.130.0" +const Version = "0.160.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 3b8461d1..e6b5c102 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -22,10 +22,32 @@ func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { // It should only be used internally by generated clients. // // This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint. +// +// Deprecated: WithDefaultEndpoint does not support setting the universe domain. +// Use WithDefaultEndpointTemplate and WithDefaultUniverseDomain to compose the +// default endpoint instead. func WithDefaultEndpoint(url string) option.ClientOption { return defaultEndpointOption(url) } +type defaultEndpointTemplateOption string + +func (o defaultEndpointTemplateOption) Apply(settings *internal.DialSettings) { + settings.DefaultEndpointTemplate = string(o) +} + +// WithDefaultEndpointTemplate provides a template for creating the endpoint +// using a universe domain. See also WithDefaultUniverseDomain and +// option.WithUniverseDomain. The placeholder UNIVERSE_DOMAIN should be used +// instead of a concrete universe domain such as "googleapis.com". +// +// Example: WithDefaultEndpointTemplate("https://logging.UNIVERSE_DOMAIN/") +// +// It should only be used internally by generated clients. +func WithDefaultEndpointTemplate(url string) option.ClientOption { + return defaultEndpointTemplateOption(url) +} + type defaultMTLSEndpointOption string func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { @@ -126,8 +148,29 @@ func (w withDefaultScopes) Apply(o *internal.DialSettings) { copy(o.DefaultScopes, w) } +// WithDefaultUniverseDomain returns a ClientOption that sets the default universe domain. +// +// It should only be used internally by generated clients. +// +// This is similar to the public WithUniverse, but allows us to determine whether the user has +// overridden the default universe. +func WithDefaultUniverseDomain(ud string) option.ClientOption { + return withDefaultUniverseDomain(ud) +} + +type withDefaultUniverseDomain string + +func (w withDefaultUniverseDomain) Apply(o *internal.DialSettings) { + o.DefaultUniverseDomain = string(w) +} + // EnableJwtWithScope returns a ClientOption that specifies if scope can be used // with self-signed JWT. +// +// EnableJwtWithScope is ignored when option.WithUniverseDomain is set +// to a value other than the Google Default Universe (GDU) of "googleapis.com". +// For non-GDU domains, token exchange is impossible and services must +// support self-signed JWTs with scopes. func EnableJwtWithScope() option.ClientOption { return enableJwtWithScope(true) } @@ -150,6 +193,19 @@ func (w *withCreds) Apply(o *internal.DialSettings) { o.InternalCredentials = (*google.Credentials)(w) } +// EnableNewAuthLibrary returns a ClientOption that specifies if libraries in this +// module to delegate auth to our new library. This option will be removed in +// the future once all clients have been moved to the new auth layer. +func EnableNewAuthLibrary() option.ClientOption { + return enableNewAuthLibrary(true) +} + +type enableNewAuthLibrary bool + +func (w enableNewAuthLibrary) Apply(o *internal.DialSettings) { + o.EnableNewAuthLibrary = bool(w) +} + // EmbeddableAdapter is a no-op option.ClientOption that allow libraries to // create their own client options by embedding this type into their own // client-specific option wrapper. See example for usage. diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index b2085a19..c882c1eb 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -343,3 +343,16 @@ func (w *withCreds) Apply(o *internal.DialSettings) { func WithCredentials(creds *google.Credentials) ClientOption { return (*withCreds)(creds) } + +// WithUniverseDomain returns a ClientOption that sets the universe domain. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithUniverseDomain(ud string) ClientOption { + return withUniverseDomain(ud) +} + +type withUniverseDomain string + +func (w withUniverseDomain) Apply(o *internal.DialSettings) { + o.UniverseDomain = string(w) +} diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json index 7885a612..234a1620 100644 --- a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-api.json @@ -707,6 +707,41 @@ "https://www.googleapis.com/auth/sqlservice.admin" ] }, + "demote": { + "description": "Demotes an existing standalone instance to be a Cloud SQL read replica for an external database server.", + "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/demote", + "httpMethod": "POST", + "id": "sql.instances.demote", + "parameterOrder": [ + "project", + "instance" + ], + "parameters": { + "instance": { + "description": "Required. The name of the Cloud SQL instance.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Required. The project ID of the project that contains the instance.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "sql/v1beta4/projects/{project}/instances/{instance}/demote", + "request": { + "$ref": "InstancesDemoteRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, "demoteMaster": { "description": "Demotes the stand-alone instance to be a Cloud SQL read replica for an external database server.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/demoteMaster", @@ -753,7 +788,7 @@ ], "parameters": { "instance": { - "description": "Cloud SQL instance ID. This does not include the project ID.", + "description": "The Cloud SQL instance ID. This doesn't include the project ID.", "location": "path", "required": true, "type": "string" @@ -1023,6 +1058,11 @@ "instance" ], "parameters": { + "failover": { + "description": "Set to true if the promote operation should attempt to re-add the original primary as a replica when it comes back online. Otherwise, if this value is false or not set, the original primary will be a standalone instance.", + "location": "query", + "type": "boolean" + }, "instance": { "description": "Cloud SQL read replica instance name.", "location": "path", @@ -1278,6 +1318,44 @@ "https://www.googleapis.com/auth/sqlservice.admin" ] }, + "switchover": { + "description": "Switches over from the primary instance to a replica instance.", + "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/switchover", + "httpMethod": "POST", + "id": "sql.instances.switchover", + "parameterOrder": [ + "project", + "instance" + ], + "parameters": { + "dbTimeout": { + "description": "Optional. (MySQL only) Cloud SQL instance operations timeout, which is a sum of all database operations. Default value is 10 minutes and can be modified to a maximum value of 24 hours.", + "format": "google-duration", + "location": "query", + "type": "string" + }, + "instance": { + "description": "Cloud SQL read replica instance name.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "ID of the project that contains the replica.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "sql/v1beta4/projects/{project}/instances/{instance}/switchover", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/sqlservice.admin" + ] + }, "truncateLog": { "description": "Truncate MySQL general and slow query log tables MySQL only.", "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/truncateLog", @@ -2122,7 +2200,7 @@ } } }, - "revision": "20230627", + "revision": "20240115", "rootUrl": "https://sqladmin.googleapis.com/", "schemas": { "AclEntry": { @@ -2170,12 +2248,16 @@ "enum": [ "SQL_API_WARNING_CODE_UNSPECIFIED", "REGION_UNREACHABLE", - "MAX_RESULTS_EXCEEDS_LIMIT" + "MAX_RESULTS_EXCEEDS_LIMIT", + "COMPROMISED_CREDENTIALS", + "INTERNAL_STATE_FAILURE" ], "enumDescriptions": [ "An unknown or unset warning type from Cloud SQL API.", "Warning when one or more regions are not reachable. The returned result set may be incomplete.", - "Warning when user provided maxResults parameter exceeds the limit. The returned result set may be incomplete." + "Warning when user provided maxResults parameter exceeds the limit. The returned result set may be incomplete.", + "Warning when user tries to create/update a user with credentials that have previously been compromised by a public data breach.", + "Warning when the operation succeeds but some non-critical workflow state failed." ], "type": "string" }, @@ -2672,6 +2754,10 @@ "description": "This is always `sql#connectSettings`.", "type": "string" }, + "pscEnabled": { + "description": "Whether PSC connectivity is enabled for this instance.", + "type": "boolean" + }, "region": { "description": "The cloud region for the instance. e.g. `us-central1`, `europe-west1`. The region cannot be changed after instance creation.", "type": "string" @@ -2935,6 +3021,11 @@ "$ref": "DiskEncryptionStatus", "description": "Disk encryption status specific to an instance." }, + "dnsName": { + "description": "Output only. The dns name of the instance.", + "readOnly": true, + "type": "string" + }, "etag": { "description": "This field is deprecated and will be removed from a future version of the API. Use the `settings.settingsVersion` field instead.", "type": "string" @@ -3015,10 +3106,21 @@ "$ref": "SqlOutOfDiskReport", "description": "This field represents the report generated by the proactive database wellness job for OutOfDisk issues. * Writers: * the proactive database wellness job for OOD. * Readers: * the proactive database wellness job" }, + "primaryDnsName": { + "deprecated": true, + "description": "Output only. DEPRECATED: please use write_endpoint instead.", + "readOnly": true, + "type": "string" + }, "project": { "description": "The project ID of the project containing the Cloud SQL instance. The Google apps domain is prefixed if applicable.", "type": "string" }, + "pscServiceAttachmentLink": { + "description": "Output only. The link to service attachment of PSC instance.", + "readOnly": true, + "type": "string" + }, "region": { "description": "The geographical region. Can be: * `us-central` (`FIRST_GEN` instances only) * `us-central1` (`SECOND_GEN` instances only) * `asia-east1` or `europe-west1`. Defaults to `us-central` or `us-central1` depending on the instance type. The region cannot be changed after instance creation.", "type": "string" @@ -3066,6 +3168,20 @@ "$ref": "Settings", "description": "The user settings." }, + "sqlNetworkArchitecture": { + "description": "The SQL network architecture for the instance.", + "enum": [ + "SQL_NETWORK_ARCHITECTURE_UNSPECIFIED", + "NEW_NETWORK_ARCHITECTURE", + "OLD_NETWORK_ARCHITECTURE" + ], + "enumDescriptions": [ + "", + "The instance uses the new network architecture.", + "The instance uses the old network architecture." + ], + "type": "string" + }, "state": { "description": "The current serving state of the Cloud SQL instance.", "enum": [ @@ -3120,6 +3236,11 @@ "type": "string" }, "type": "array" + }, + "writeEndpoint": { + "description": "Output only. The dns name of the primary instance in a replication group.", + "readOnly": true, + "type": "string" } }, "type": "object" @@ -3142,6 +3263,21 @@ }, "type": "object" }, + "DemoteContext": { + "description": "This context is used to demote an existing standalone instance to be a Cloud SQL read replica for an external database server.", + "id": "DemoteContext", + "properties": { + "kind": { + "description": "This is always `sql#demoteContext`.", + "type": "string" + }, + "sourceRepresentativeInstanceName": { + "description": "Required. The name of the instance which acts as an on-premises primary instance in the replication setup.", + "type": "string" + } + }, + "type": "object" + }, "DemoteMasterConfiguration": { "description": "Read-replica configuration for connecting to the on-premises primary instance.", "id": "DemoteMasterConfiguration", @@ -3282,12 +3418,14 @@ "enum": [ "BAK_TYPE_UNSPECIFIED", "FULL", - "DIFF" + "DIFF", + "TLOG" ], "enumDescriptions": [ - "default type.", + "Default type.", "Full backup.", - "Differential backup." + "Differential backup.", + "SQL Server Transaction Log" ], "type": "string" }, @@ -3383,6 +3521,10 @@ }, "type": "object" }, + "parallel": { + "description": "Optional. Whether or not the export should be parallel.", + "type": "boolean" + }, "schemaOnly": { "description": "Export only schemas.", "type": "boolean" @@ -3393,6 +3535,11 @@ "type": "string" }, "type": "array" + }, + "threads": { + "description": "Optional. The number of threads to use for parallel export.", + "format": "int32", + "type": "integer" } }, "type": "object" @@ -3682,12 +3829,14 @@ "enum": [ "BAK_TYPE_UNSPECIFIED", "FULL", - "DIFF" + "DIFF", + "TLOG" ], "enumDescriptions": [ - "default type.", + "Default type.", "Full backup.", - "Differential backup." + "Differential backup.", + "SQL Server Transaction Log" ], "type": "string" }, @@ -3716,6 +3865,15 @@ "description": "Whether or not the backup importing request will just bring database online without downloading Bak content only one of \"no_recovery\" and \"recovery_only\" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server.", "type": "boolean" }, + "stopAt": { + "description": "Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only.", + "format": "google-datetime", + "type": "string" + }, + "stopAtMark": { + "description": "Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only.", + "type": "string" + }, "striped": { "description": "Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server.", "type": "boolean" @@ -3861,6 +4019,17 @@ }, "type": "object" }, + "InstancesDemoteRequest": { + "description": "This request is used to demote an existing standalone instance to be a Cloud SQL read replica for an external database server.", + "id": "InstancesDemoteRequest", + "properties": { + "demoteContext": { + "$ref": "DemoteContext", + "description": "Required. This context is used to demote an existing standalone instance to be a Cloud SQL read replica for an external database server." + } + }, + "type": "object" + }, "InstancesExportRequest": { "description": "Database instance export request.", "id": "InstancesExportRequest", @@ -4015,15 +4184,35 @@ "description": "The resource link for the VPC network from which the Cloud SQL instance is accessible for private IP. For example, `/projects/myProject/global/networks/default`. This setting can be updated, but it cannot be removed after it is set.", "type": "string" }, + "pscConfig": { + "$ref": "PscConfig", + "description": "PSC settings for this instance." + }, "requireSsl": { - "description": "Whether SSL connections over IP are enforced or not.", + "description": "Use `ssl_mode` instead for MySQL and PostgreSQL. SQL Server uses this flag. Whether SSL/TLS connections over IP are enforced. If set to false, then allow both non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. If set to true, then only allow connections encrypted with SSL/TLS and with valid client certificates. If you want to enforce SSL/TLS without enforcing the requirement for valid client certificates, then use the `ssl_mode` flag instead of the legacy `require_ssl` flag.", "type": "boolean" + }, + "sslMode": { + "description": "Specify how SSL/TLS is enforced in database connections. MySQL and PostgreSQL use the `ssl_mode` flag. If you must use the `require_ssl` flag for backward compatibility, then only the following value pairs are valid: * `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` The value of `ssl_mode` gets priority over the value of `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only accept SSL connections, while the `require_ssl=false` means accept both non-SSL and SSL connections. MySQL and PostgreSQL databases respect `ssl_mode` in this case and accept only SSL connections. SQL Server uses the `require_ssl` flag. You can set the value for this flag to `true` or `false`.", + "enum": [ + "SSL_MODE_UNSPECIFIED", + "ALLOW_UNENCRYPTED_AND_ENCRYPTED", + "ENCRYPTED_ONLY", + "TRUSTED_CLIENT_CERTIFICATE_REQUIRED" + ], + "enumDescriptions": [ + "The SSL mode is unknown.", + "Allow non-SSL/non-TLS and SSL/TLS connections. For SSL/TLS connections, the client certificate won't be verified. When this value is used, the legacy `require_ssl` flag must be false or cleared to avoid the conflict between values of two flags.", + "Only allow connections encrypted with SSL/TLS. When this value is used, the legacy `require_ssl` flag must be false or cleared to avoid the conflict between values of two flags.", + "Only allow connections encrypted with SSL/TLS and with valid client certificates. When this value is used, the legacy `require_ssl` flag must be true or cleared to avoid the conflict between values of two flags." + ], + "type": "string" } }, "type": "object" }, "IpMapping": { - "description": "Database instance IP Mapping.", + "description": "Database instance IP mapping", "id": "IpMapping", "properties": { "ipAddress": { @@ -4070,7 +4259,7 @@ "type": "string" }, "secondaryZone": { - "description": "The preferred Compute Engine zone for the secondary/failover (for example: us-central1-a, us-central1-b, etc.).", + "description": "The preferred Compute Engine zone for the secondary/failover (for example: us-central1-a, us-central1-b, etc.). To disable this field, set it to 'no_secondary_zone'.", "type": "string" }, "zone": { @@ -4103,12 +4292,14 @@ "enum": [ "SQL_UPDATE_TRACK_UNSPECIFIED", "canary", - "stable" + "stable", + "week5" ], "enumDescriptions": [ "This is an unknown maintenance timing preference.", "For instance update that requires a restart, this update track indicates your instance prefer to restart for new version early in maintenance window.", - "For instance update that requires a restart, this update track indicates your instance prefer to let Cloud SQL choose the timing of restart (within its Maintenance window, if applicable)." + "For instance update that requires a restart, this update track indicates your instance prefer to let Cloud SQL choose the timing of restart (within its Maintenance window, if applicable).", + "For instance update that requires a restart, this update track indicates your instance prefer to let Cloud SQL choose the timing of restart (within its Maintenance window, if applicable) to be at least 5 weeks after the notification." ], "type": "string" } @@ -4229,6 +4420,10 @@ "description": "An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.", "id": "Operation", "properties": { + "apiWarning": { + "$ref": "ApiWarning", + "description": "An Admin API warning message." + }, "backupContext": { "$ref": "BackupContext", "description": "The context for backup operation, if applicable." @@ -4568,6 +4763,10 @@ ], "type": "string" }, + "disallowCompromisedCredentials": { + "description": "Disallow credentials that have been previously compromised by a public data breach.", + "type": "boolean" + }, "disallowUsernameSubstring": { "description": "Disallow username as a part of the password.", "type": "boolean" @@ -4606,10 +4805,32 @@ }, "type": "object" }, + "PscConfig": { + "description": "PSC settings for a Cloud SQL instance.", + "id": "PscConfig", + "properties": { + "allowedConsumerProjects": { + "description": "Optional. The list of consumer projects that are allow-listed for PSC connections to this instance. This instance can be connected to with PSC from any network in these projects. Each consumer project in this list may be represented by a project number (numeric) or by a project id (alphanumeric).", + "items": { + "type": "string" + }, + "type": "array" + }, + "pscEnabled": { + "description": "Whether PSC connectivity is enabled for this instance.", + "type": "boolean" + } + }, + "type": "object" + }, "ReplicaConfiguration": { "description": "Read-replica configuration for connecting to the primary instance.", "id": "ReplicaConfiguration", "properties": { + "cascadableReplica": { + "description": "Optional. Specifies if a SQL Server replica is a cascadable replica. A cascadable replica is a SQL Server cross region replica that supports replica(s) under it.", + "type": "boolean" + }, "failoverTarget": { "description": "Specifies if the replica is the failover target. If the field is set to `true` the replica will be designated as a failover replica. In case the primary instance fails, the replica instance will be promoted as the new primary instance. Only one replica can be specified as failover target, and the replica has to be in different zone with the primary instance.", "type": "boolean" @@ -4994,7 +5215,13 @@ "INSUFFICIENT_GCS_PERMISSIONS", "INVALID_FILE_INFO", "UNSUPPORTED_DATABASE_SETTINGS", - "MYSQL_PARALLEL_IMPORT_INSUFFICIENT_PRIVILEGE" + "MYSQL_PARALLEL_IMPORT_INSUFFICIENT_PRIVILEGE", + "LOCAL_INFILE_OFF", + "TURN_ON_PITR_AFTER_PROMOTE", + "INCOMPATIBLE_DATABASE_MINOR_VERSION", + "SOURCE_MAX_SUBSCRIPTIONS", + "UNABLE_TO_VERIFY_DEFINERS", + "SUBSCRIPTION_CALCULATION_STATUS" ], "enumDescriptions": [ "", @@ -5031,7 +5258,13 @@ "The Cloud Storage bucket is missing necessary permissions.", "The Cloud Storage bucket has an error in the file or contains invalid file information.", "The source instance has unsupported database settings for migration.", - "The replication user is missing parallel import specific privileges. (e.g. LOCK TABLES) for MySQL." + "The replication user is missing parallel import specific privileges. (e.g. LOCK TABLES) for MySQL.", + "The global variable local_infile is off on external server replica.", + "This code instructs customers to turn on point-in-time recovery manually for the instance after promoting the Cloud SQL for PostgreSQL instance.", + "The minor version of replica database is incompatible with the source.", + "This warning message indicates that Cloud SQL uses the maximum number of subscriptions to migrate data from the source to the destination.", + "Unable to verify definers on the source for MySQL.", + "If a time out occurs while the subscription counts are calculated, then this value is set to 1. Otherwise, this value is set to 2." ], "type": "string" } @@ -5569,12 +5802,18 @@ "enum": [ "BUILT_IN", "CLOUD_IAM_USER", - "CLOUD_IAM_SERVICE_ACCOUNT" + "CLOUD_IAM_SERVICE_ACCOUNT", + "CLOUD_IAM_GROUP", + "CLOUD_IAM_GROUP_USER", + "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" ], "enumDescriptions": [ "The database's built-in user type.", "Cloud IAM user.", - "Cloud IAM service account." + "Cloud IAM service account.", + "Cloud IAM group non-login user.", + "Cloud IAM group login user.", + "Cloud IAM group service account." ], "type": "string" } diff --git a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go index b66ec18c..0b9f1d85 100644 --- a/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go +++ b/vendor/google.golang.org/api/sqladmin/v1beta4/sqladmin-gen.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC. +// Copyright 2024 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -8,6 +8,17 @@ // // For product documentation, see: https://developers.google.com/cloud-sql/ // +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// // # Creating a client // // Usage example: @@ -17,28 +28,31 @@ // ctx := context.Background() // sqladminService, err := sqladmin.NewService(ctx) // -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // # Other authentication options // -// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// By default, all available scopes (see "Constants") are used to authenticate. +// To restrict scopes, use [google.golang.org/api/option.WithScopes]: // // sqladminService, err := sqladmin.NewService(ctx, option.WithScopes(sqladmin.SqlserviceAdminScope)) // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // // sqladminService, err := sqladmin.NewService(ctx, option.WithAPIKey("AIza...")) // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // sqladminService, err := sqladmin.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// See [google.golang.org/api/option.ClientOption] for details on options. package sqladmin // import "google.golang.org/api/sqladmin/v1beta4" import ( @@ -81,7 +95,9 @@ const apiId = "sqladmin:v1beta4" const apiName = "sqladmin" const apiVersion = "v1beta4" const basePath = "https://sqladmin.googleapis.com/" +const basePathTemplate = "https://sqladmin.UNIVERSE_DOMAIN/" const mtlsBasePath = "https://sqladmin.mtls.googleapis.com/" +const defaultUniverseDomain = "googleapis.com" // OAuth2 scopes used by this API. const ( @@ -102,7 +118,9 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate)) opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) + opts = append(opts, internaloption.WithDefaultUniverseDomain(defaultUniverseDomain)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -357,6 +375,11 @@ type ApiWarning struct { // "MAX_RESULTS_EXCEEDS_LIMIT" - Warning when user provided maxResults // parameter exceeds the limit. The returned result set may be // incomplete. + // "COMPROMISED_CREDENTIALS" - Warning when user tries to + // create/update a user with credentials that have previously been + // compromised by a public data breach. + // "INTERNAL_STATE_FAILURE" - Warning when the operation succeeds but + // some non-critical workflow state failed. Code string `json:"code,omitempty"` // Message: The warning message. @@ -916,6 +939,9 @@ type ConnectSettings struct { // Kind: This is always `sql#connectSettings`. Kind string `json:"kind,omitempty"` + // PscEnabled: Whether PSC connectivity is enabled for this instance. + PscEnabled bool `json:"pscEnabled,omitempty"` + // Region: The cloud region for the instance. e.g. `us-central1`, // `europe-west1`. The region cannot be changed after instance creation. Region string `json:"region,omitempty"` @@ -1189,6 +1215,9 @@ type DatabaseInstance struct { // DiskEncryptionStatus: Disk encryption status specific to an instance. DiskEncryptionStatus *DiskEncryptionStatus `json:"diskEncryptionStatus,omitempty"` + // DnsName: Output only. The dns name of the instance. + DnsName string `json:"dnsName,omitempty"` + // Etag: This field is deprecated and will be removed from a future // version of the API. Use the `settings.settingsVersion` field instead. Etag string `json:"etag,omitempty"` @@ -1250,10 +1279,18 @@ type DatabaseInstance struct { // proactive database wellness job OutOfDiskReport *SqlOutOfDiskReport `json:"outOfDiskReport,omitempty"` + // PrimaryDnsName: Output only. DEPRECATED: please use write_endpoint + // instead. + PrimaryDnsName string `json:"primaryDnsName,omitempty"` + // Project: The project ID of the project containing the Cloud SQL // instance. The Google apps domain is prefixed if applicable. Project string `json:"project,omitempty"` + // PscServiceAttachmentLink: Output only. The link to service attachment + // of PSC instance. + PscServiceAttachmentLink string `json:"pscServiceAttachmentLink,omitempty"` + // Region: The geographical region. Can be: * `us-central` (`FIRST_GEN` // instances only) * `us-central1` (`SECOND_GEN` instances only) * // `asia-east1` or `europe-west1`. Defaults to `us-central` or @@ -1300,6 +1337,17 @@ type DatabaseInstance struct { // Settings: The user settings. Settings *Settings `json:"settings,omitempty"` + // SqlNetworkArchitecture: The SQL network architecture for the + // instance. + // + // Possible values: + // "SQL_NETWORK_ARCHITECTURE_UNSPECIFIED" + // "NEW_NETWORK_ARCHITECTURE" - The instance uses the new network + // architecture. + // "OLD_NETWORK_ARCHITECTURE" - The instance uses the old network + // architecture. + SqlNetworkArchitecture string `json:"sqlNetworkArchitecture,omitempty"` + // State: The current serving state of the Cloud SQL instance. // // Possible values: @@ -1332,6 +1380,10 @@ type DatabaseInstance struct { // revoked or denied access to SuspensionReason []string `json:"suspensionReason,omitempty"` + // WriteEndpoint: Output only. The dns name of the primary instance in a + // replication group. + WriteEndpoint string `json:"writeEndpoint,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -1433,6 +1485,41 @@ func (s *DatabasesListResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// DemoteContext: This context is used to demote an existing standalone +// instance to be a Cloud SQL read replica for an external database +// server. +type DemoteContext struct { + // Kind: This is always `sql#demoteContext`. + Kind string `json:"kind,omitempty"` + + // SourceRepresentativeInstanceName: Required. The name of the instance + // which acts as an on-premises primary instance in the replication + // setup. + SourceRepresentativeInstanceName string `json:"sourceRepresentativeInstanceName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DemoteContext) MarshalJSON() ([]byte, error) { + type NoMethod DemoteContext + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DemoteMasterConfiguration: Read-replica configuration for connecting // to the on-premises primary instance. type DemoteMasterConfiguration struct { @@ -1765,9 +1852,10 @@ type ExportContextBakExportOptions struct { // Server only // // Possible values: - // "BAK_TYPE_UNSPECIFIED" - default type. + // "BAK_TYPE_UNSPECIFIED" - Default type. // "FULL" - Full backup. // "DIFF" - Differential backup. + // "TLOG" - SQL Server Transaction Log BakType string `json:"bakType,omitempty"` // CopyOnly: Deprecated: copy_only is deprecated. Use differential_base @@ -1863,6 +1951,9 @@ type ExportContextSqlExportOptions struct { // MysqlExportOptions: Options for exporting from MySQL. MysqlExportOptions *ExportContextSqlExportOptionsMysqlExportOptions `json:"mysqlExportOptions,omitempty"` + // Parallel: Optional. Whether or not the export should be parallel. + Parallel bool `json:"parallel,omitempty"` + // SchemaOnly: Export only schemas. SchemaOnly bool `json:"schemaOnly,omitempty"` @@ -1871,6 +1962,9 @@ type ExportContextSqlExportOptions struct { // For PostgreSQL instances, you can specify only one table. Tables []string `json:"tables,omitempty"` + // Threads: Optional. The number of threads to use for parallel export. + Threads int64 `json:"threads,omitempty"` + // ForceSendFields is a list of field names (e.g. "MysqlExportOptions") // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -2284,9 +2378,10 @@ type ImportContextBakImportOptions struct { // BakType: Type of the bak content, FULL or DIFF. // // Possible values: - // "BAK_TYPE_UNSPECIFIED" - default type. + // "BAK_TYPE_UNSPECIFIED" - Default type. // "FULL" - Full backup. // "DIFF" - Differential backup. + // "TLOG" - SQL Server Transaction Log BakType string `json:"bakType,omitempty"` EncryptionOptions *ImportContextBakImportOptionsEncryptionOptions `json:"encryptionOptions,omitempty"` @@ -2301,6 +2396,18 @@ type ImportContextBakImportOptions struct { // return. Applies only to Cloud SQL for SQL Server. RecoveryOnly bool `json:"recoveryOnly,omitempty"` + // StopAt: Optional. The timestamp when the import should stop. This + // timestamp is in the RFC 3339 (https://tools.ietf.org/html/rfc3339) + // format (for example, `2023-10-01T16:19:00.094`). This field is + // equivalent to the STOPAT keyword and applies to Cloud SQL for SQL + // Server only. + StopAt string `json:"stopAt,omitempty"` + + // StopAtMark: Optional. The marked transaction where the import should + // stop. This field is equivalent to the STOPATMARK keyword and applies + // to Cloud SQL for SQL Server only. + StopAtMark string `json:"stopAtMark,omitempty"` + // Striped: Whether or not the backup set being restored is striped. // Applies only to Cloud SQL for SQL Server. Striped bool `json:"striped,omitempty"` @@ -2559,6 +2666,38 @@ func (s *InstancesDemoteMasterRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstancesDemoteRequest: This request is used to demote an existing +// standalone instance to be a Cloud SQL read replica for an external +// database server. +type InstancesDemoteRequest struct { + // DemoteContext: Required. This context is used to demote an existing + // standalone instance to be a Cloud SQL read replica for an external + // database server. + DemoteContext *DemoteContext `json:"demoteContext,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DemoteContext") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DemoteContext") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstancesDemoteRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesDemoteRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstancesExportRequest: Database instance export request. type InstancesExportRequest struct { // ExportContext: Contains details about the export operation. @@ -2878,9 +3017,52 @@ type IpConfiguration struct { // updated, but it cannot be removed after it is set. PrivateNetwork string `json:"privateNetwork,omitempty"` - // RequireSsl: Whether SSL connections over IP are enforced or not. + // PscConfig: PSC settings for this instance. + PscConfig *PscConfig `json:"pscConfig,omitempty"` + + // RequireSsl: Use `ssl_mode` instead for MySQL and PostgreSQL. SQL + // Server uses this flag. Whether SSL/TLS connections over IP are + // enforced. If set to false, then allow both non-SSL/non-TLS and + // SSL/TLS connections. For SSL/TLS connections, the client certificate + // won't be verified. If set to true, then only allow connections + // encrypted with SSL/TLS and with valid client certificates. If you + // want to enforce SSL/TLS without enforcing the requirement for valid + // client certificates, then use the `ssl_mode` flag instead of the + // legacy `require_ssl` flag. RequireSsl bool `json:"requireSsl,omitempty"` + // SslMode: Specify how SSL/TLS is enforced in database connections. + // MySQL and PostgreSQL use the `ssl_mode` flag. If you must use the + // `require_ssl` flag for backward compatibility, then only the + // following value pairs are valid: * + // `ssl_mode=ALLOW_UNENCRYPTED_AND_ENCRYPTED` and `require_ssl=false` * + // `ssl_mode=ENCRYPTED_ONLY` and `require_ssl=false` * + // `ssl_mode=TRUSTED_CLIENT_CERTIFICATE_REQUIRED` and `require_ssl=true` + // The value of `ssl_mode` gets priority over the value of + // `require_ssl`. For example, for the pair `ssl_mode=ENCRYPTED_ONLY` + // and `require_ssl=false`, the `ssl_mode=ENCRYPTED_ONLY` means only + // accept SSL connections, while the `require_ssl=false` means accept + // both non-SSL and SSL connections. MySQL and PostgreSQL databases + // respect `ssl_mode` in this case and accept only SSL connections. SQL + // Server uses the `require_ssl` flag. You can set the value for this + // flag to `true` or `false`. + // + // Possible values: + // "SSL_MODE_UNSPECIFIED" - The SSL mode is unknown. + // "ALLOW_UNENCRYPTED_AND_ENCRYPTED" - Allow non-SSL/non-TLS and + // SSL/TLS connections. For SSL/TLS connections, the client certificate + // won't be verified. When this value is used, the legacy `require_ssl` + // flag must be false or cleared to avoid the conflict between values of + // two flags. + // "ENCRYPTED_ONLY" - Only allow connections encrypted with SSL/TLS. + // When this value is used, the legacy `require_ssl` flag must be false + // or cleared to avoid the conflict between values of two flags. + // "TRUSTED_CLIENT_CERTIFICATE_REQUIRED" - Only allow connections + // encrypted with SSL/TLS and with valid client certificates. When this + // value is used, the legacy `require_ssl` flag must be true or cleared + // to avoid the conflict between values of two flags. + SslMode string `json:"sslMode,omitempty"` + // ForceSendFields is a list of field names (e.g. "AllocatedIpRange") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -2905,7 +3087,7 @@ func (s *IpConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// IpMapping: Database instance IP Mapping. +// IpMapping: Database instance IP mapping type IpMapping struct { // IpAddress: The IP address assigned. IpAddress string `json:"ipAddress,omitempty"` @@ -2976,6 +3158,7 @@ type LocationPreference struct { // SecondaryZone: The preferred Compute Engine zone for the // secondary/failover (for example: us-central1-a, us-central1-b, etc.). + // To disable this field, set it to 'no_secondary_zone'. SecondaryZone string `json:"secondaryZone,omitempty"` // Zone: The preferred Compute Engine zone (for example: us-central1-a, @@ -3033,6 +3216,10 @@ type MaintenanceWindow struct { // "stable" - For instance update that requires a restart, this update // track indicates your instance prefer to let Cloud SQL choose the // timing of restart (within its Maintenance window, if applicable). + // "week5" - For instance update that requires a restart, this update + // track indicates your instance prefer to let Cloud SQL choose the + // timing of restart (within its Maintenance window, if applicable) to + // be at least 5 weeks after the notification. UpdateTrack string `json:"updateTrack,omitempty"` // ForceSendFields is a list of field names (e.g. "Day") to @@ -3219,6 +3406,9 @@ func (s *OnPremisesConfiguration) MarshalJSON() ([]byte, error) { // return an Operation resource, only the fields relevant to the // operation are populated in the resource. type Operation struct { + // ApiWarning: An Admin API warning message. + ApiWarning *ApiWarning `json:"apiWarning,omitempty"` + // BackupContext: The context for backup operation, if applicable. BackupContext *BackupContext `json:"backupContext,omitempty"` @@ -3342,7 +3532,7 @@ type Operation struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "BackupContext") to + // ForceSendFields is a list of field names (e.g. "ApiWarning") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -3350,10 +3540,10 @@ type Operation struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BackupContext") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "ApiWarning") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` @@ -3565,6 +3755,10 @@ type PasswordValidationPolicy struct { // numeric, and non-alphanumeric characters. Complexity string `json:"complexity,omitempty"` + // DisallowCompromisedCredentials: Disallow credentials that have been + // previously compromised by a public data breach. + DisallowCompromisedCredentials bool `json:"disallowCompromisedCredentials,omitempty"` + // DisallowUsernameSubstring: Disallow username as a part of the // password. DisallowUsernameSubstring bool `json:"disallowUsernameSubstring,omitempty"` @@ -3633,9 +3827,51 @@ func (s *PerformDiskShrinkContext) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// PscConfig: PSC settings for a Cloud SQL instance. +type PscConfig struct { + // AllowedConsumerProjects: Optional. The list of consumer projects that + // are allow-listed for PSC connections to this instance. This instance + // can be connected to with PSC from any network in these projects. Each + // consumer project in this list may be represented by a project number + // (numeric) or by a project id (alphanumeric). + AllowedConsumerProjects []string `json:"allowedConsumerProjects,omitempty"` + + // PscEnabled: Whether PSC connectivity is enabled for this instance. + PscEnabled bool `json:"pscEnabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AllowedConsumerProjects") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowedConsumerProjects") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *PscConfig) MarshalJSON() ([]byte, error) { + type NoMethod PscConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ReplicaConfiguration: Read-replica configuration for connecting to // the primary instance. type ReplicaConfiguration struct { + // CascadableReplica: Optional. Specifies if a SQL Server replica is a + // cascadable replica. A cascadable replica is a SQL Server cross region + // replica that supports replica(s) under it. + CascadableReplica bool `json:"cascadableReplica,omitempty"` + // FailoverTarget: Specifies if the replica is the failover target. If // the field is set to `true` the replica will be designated as a // failover replica. In case the primary instance fails, the replica @@ -3656,15 +3892,15 @@ type ReplicaConfiguration struct { // the data directory. MysqlReplicaConfiguration *MySqlReplicaConfiguration `json:"mysqlReplicaConfiguration,omitempty"` - // ForceSendFields is a list of field names (e.g. "FailoverTarget") to - // unconditionally include in API requests. By default, fields with + // ForceSendFields is a list of field names (e.g. "CascadableReplica") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "FailoverTarget") to + // NullFields is a list of field names (e.g. "CascadableReplica") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the @@ -4128,6 +4364,21 @@ type SqlExternalSyncSettingError struct { // "MYSQL_PARALLEL_IMPORT_INSUFFICIENT_PRIVILEGE" - The replication // user is missing parallel import specific privileges. (e.g. LOCK // TABLES) for MySQL. + // "LOCAL_INFILE_OFF" - The global variable local_infile is off on + // external server replica. + // "TURN_ON_PITR_AFTER_PROMOTE" - This code instructs customers to + // turn on point-in-time recovery manually for the instance after + // promoting the Cloud SQL for PostgreSQL instance. + // "INCOMPATIBLE_DATABASE_MINOR_VERSION" - The minor version of + // replica database is incompatible with the source. + // "SOURCE_MAX_SUBSCRIPTIONS" - This warning message indicates that + // Cloud SQL uses the maximum number of subscriptions to migrate data + // from the source to the destination. + // "UNABLE_TO_VERIFY_DEFINERS" - Unable to verify definers on the + // source for MySQL. + // "SUBSCRIPTION_CALCULATION_STATUS" - If a time out occurs while the + // subscription counts are calculated, then this value is set to 1. + // Otherwise, this value is set to 2. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Detail") to @@ -5020,6 +5271,10 @@ type User struct { // "BUILT_IN" - The database's built-in user type. // "CLOUD_IAM_USER" - Cloud IAM user. // "CLOUD_IAM_SERVICE_ACCOUNT" - Cloud IAM service account. + // "CLOUD_IAM_GROUP" - Cloud IAM group non-login user. + // "CLOUD_IAM_GROUP_USER" - Cloud IAM group login user. + // "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" - Cloud IAM group service + // account. Type string `json:"type,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -7718,6 +7973,160 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro } +// method id "sql.instances.demote": + +type InstancesDemoteCall struct { + s *Service + project string + instance string + instancesdemoterequest *InstancesDemoteRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Demote: Demotes an existing standalone instance to be a Cloud SQL +// read replica for an external database server. +// +// - instance: The name of the Cloud SQL instance. +// - project: The project ID of the project that contains the instance. +func (r *InstancesService) Demote(project string, instance string, instancesdemoterequest *InstancesDemoteRequest) *InstancesDemoteCall { + c := &InstancesDemoteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + c.instancesdemoterequest = instancesdemoterequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesDemoteCall) Fields(s ...googleapi.Field) *InstancesDemoteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesDemoteCall) Context(ctx context.Context) *InstancesDemoteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesDemoteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesDemoteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesdemoterequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/demote") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sql.instances.demote" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesDemoteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Demotes an existing standalone instance to be a Cloud SQL read replica for an external database server.", + // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/demote", + // "httpMethod": "POST", + // "id": "sql.instances.demote", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Required. The name of the Cloud SQL instance.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Required. The project ID of the project that contains the instance.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "sql/v1beta4/projects/{project}/instances/{instance}/demote", + // "request": { + // "$ref": "InstancesDemoteRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + // method id "sql.instances.demoteMaster": type InstancesDemoteMasterCall struct { @@ -7887,8 +8296,8 @@ type InstancesExportCall struct { // Export: Exports data from a Cloud SQL instance to a Cloud Storage // bucket as a SQL dump or CSV file. // -// - instance: Cloud SQL instance ID. This does not include the project -// ID. +// - instance: The Cloud SQL instance ID. This doesn't include the +// project ID. // - project: Project ID of the project that contains the instance to be // exported. func (r *InstancesService) Export(project string, instance string, instancesexportrequest *InstancesExportRequest) *InstancesExportCall { @@ -8001,7 +8410,7 @@ func (c *InstancesExportCall) Do(opts ...googleapi.CallOption) (*Operation, erro // ], // "parameters": { // "instance": { - // "description": "Cloud SQL instance ID. This does not include the project ID.", + // "description": "The Cloud SQL instance ID. This doesn't include the project ID.", // "location": "path", // "required": true, // "type": "string" @@ -9200,6 +9609,15 @@ func (r *InstancesService) PromoteReplica(project string, instance string) *Inst return c } +// Failover sets the optional parameter "failover": Set to true if the +// promote operation should attempt to re-add the original primary as a +// replica when it comes back online. Otherwise, if this value is false +// or not set, the original primary will be a standalone instance. +func (c *InstancesPromoteReplicaCall) Failover(failover bool) *InstancesPromoteReplicaCall { + c.urlParams_.Set("failover", fmt.Sprint(failover)) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. @@ -9296,6 +9714,11 @@ func (c *InstancesPromoteReplicaCall) Do(opts ...googleapi.CallOption) (*Operati // "instance" // ], // "parameters": { + // "failover": { + // "description": "Set to true if the promote operation should attempt to re-add the original primary as a replica when it comes back online. Otherwise, if this value is false or not set, the original primary will be a standalone instance.", + // "location": "query", + // "type": "boolean" + // }, // "instance": { // "description": "Cloud SQL read replica instance name.", // "location": "path", @@ -10362,6 +10785,165 @@ func (c *InstancesStopReplicaCall) Do(opts ...googleapi.CallOption) (*Operation, } +// method id "sql.instances.switchover": + +type InstancesSwitchoverCall struct { + s *Service + project string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Switchover: Switches over from the primary instance to a replica +// instance. +// +// - instance: Cloud SQL read replica instance name. +// - project: ID of the project that contains the replica. +func (r *InstancesService) Switchover(project string, instance string) *InstancesSwitchoverCall { + c := &InstancesSwitchoverCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.instance = instance + return c +} + +// DbTimeout sets the optional parameter "dbTimeout": (MySQL only) Cloud +// SQL instance operations timeout, which is a sum of all database +// operations. Default value is 10 minutes and can be modified to a +// maximum value of 24 hours. +func (c *InstancesSwitchoverCall) DbTimeout(dbTimeout string) *InstancesSwitchoverCall { + c.urlParams_.Set("dbTimeout", dbTimeout) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSwitchoverCall) Fields(s ...googleapi.Field) *InstancesSwitchoverCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSwitchoverCall) Context(ctx context.Context) *InstancesSwitchoverCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSwitchoverCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSwitchoverCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "sql/v1beta4/projects/{project}/instances/{instance}/switchover") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "sql.instances.switchover" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSwitchoverCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Switches over from the primary instance to a replica instance.", + // "flatPath": "sql/v1beta4/projects/{project}/instances/{instance}/switchover", + // "httpMethod": "POST", + // "id": "sql.instances.switchover", + // "parameterOrder": [ + // "project", + // "instance" + // ], + // "parameters": { + // "dbTimeout": { + // "description": "Optional. (MySQL only) Cloud SQL instance operations timeout, which is a sum of all database operations. Default value is 10 minutes and can be modified to a maximum value of 24 hours.", + // "format": "google-duration", + // "location": "query", + // "type": "string" + // }, + // "instance": { + // "description": "Cloud SQL read replica instance name.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "ID of the project that contains the replica.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "sql/v1beta4/projects/{project}/instances/{instance}/switchover", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/sqlservice.admin" + // ] + // } + +} + // method id "sql.instances.truncateLog": type InstancesTruncateLogCall struct { diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index eca0c3ba..75b266ac 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -16,6 +16,7 @@ import ( "time" "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" @@ -69,6 +70,9 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, settings) trans = addOCTransport(trans, settings) switch { case settings.NoAuth: @@ -84,6 +88,17 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } + credsUniverseDomain, err := creds.GetUniverseDomain() + if err != nil { + return nil, err + } + if settings.TokenSource == nil { + // We only validate non-tokensource creds, as TokenSource-based credentials + // don't propagate universe. + if settings.GetUniverseDomain() != credsUniverseDomain { + return nil, internal.ErrUniverseNotMatch(settings.GetUniverseDomain(), credsUniverseDomain) + } + } paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { @@ -145,22 +160,13 @@ func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error return rt.RoundTrip(&newReq) } -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineUrlfetchHook func(context.Context) http.RoundTripper - -// defaultBaseTransport returns the base HTTP transport. -// On App Engine, this is urlfetch.Transport. -// Otherwise, use a default transport, taking most defaults from -// http.DefaultTransport. +// defaultBaseTransport returns the base HTTP transport. It uses a default +// transport, taking most defaults from http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - if appengineUrlfetchHook != nil { - return appengineUrlfetchHook(ctx) - } - // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, - // which is increased due to reported performance issues under load in the GCS - // client. Transport.Clone is only available in Go 1.13 and up. + // which is increased due to reported performance issues under load in the + // GCS client. Transport.Clone is only available in Go 1.13 and up. trans := clonedTransport(http.DefaultTransport) if trans == nil { trans = fallbackBaseTransport() @@ -212,6 +218,13 @@ func fallbackBaseTransport() *http.Transport { } } +func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { + if settings.TelemetryDisabled { + return trans + } + return otelhttp.NewTransport(trans) +} + func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { if settings.TelemetryDisabled { return trans diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go deleted file mode 100644 index f064e133..00000000 --- a/vendor/google.golang.org/api/transport/http/dial_appengine.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package http - -import ( - "context" - "net/http" - - "google.golang.org/appengine/urlfetch" -) - -func init() { - appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { - return &urlfetch.Transport{Context: ctx} - } -} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 6d03f4d3..00000000 --- a/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go_import_path: google.golang.org/appengine - -install: - - ./travis_install.sh - -script: - - ./travis_test.sh - -matrix: - include: - - go: 1.9.x - env: GOAPP=true - - go: 1.10.x - env: GOAPP=false - - go: 1.11.x - env: GO111MODULE=on diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md index ffc29852..28969361 100644 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md @@ -19,14 +19,12 @@ ## Running system tests -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. -Run tests with `goapp test`: +Run tests with `go test`: ``` -goapp test -v google.golang.org/appengine/... +go test -v google.golang.org/appengine/... ``` ## Contributor License Agreements diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md index 9fdbacd3..5ccddd99 100644 --- a/vendor/google.golang.org/appengine/README.md +++ b/vendor/google.golang.org/appengine/README.md @@ -1,6 +1,6 @@ # Go App Engine packages -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) +[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml) This repository supports the Go runtime on *App Engine standard*. It provides APIs for interacting with App Engine services. @@ -51,7 +51,7 @@ code importing `appengine/datastore` will now need to import `google.golang.org/ Most App Engine services are available with exactly the same API. A few APIs were cleaned up, and there are some differences: -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* `appengine.Context` has been replaced with the `Context` type from `context`. * Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. * `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. * `appengine.Datacenter` now takes a `context.Context` argument. @@ -72,7 +72,7 @@ A few APIs were cleaned up, and there are some differences: * `appengine/socket` is not required on App Engine flexible environment / Managed VMs. Use the standard `net` package instead. -## Key Encode/Decode compatibiltiy to help with datastore library migrations +## Key Encode/Decode compatibility to help with datastore library migrations Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 8c969767..35ba9c89 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -9,10 +9,10 @@ package appengine // import "google.golang.org/appengine" import ( + "context" "net/http" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" ) @@ -35,18 +35,18 @@ import ( // // Main is designed so that the app's main package looks like this: // -// package main +// package main // -// import ( -// "google.golang.org/appengine" +// import ( +// "google.golang.org/appengine" // -// _ "myapp/package0" -// _ "myapp/package1" -// ) +// _ "myapp/package0" +// _ "myapp/package1" +// ) // -// func main() { -// appengine.Main() -// } +// func main() { +// appengine.Main() +// } // // The "myapp/packageX" packages are expected to register HTTP handlers // in their init functions. @@ -54,6 +54,9 @@ func Main() { internal.Main() } +// Middleware wraps an http handler so that it can make GAE API calls +var Middleware func(http.Handler) http.Handler = internal.Middleware + // IsDevAppServer reports whether the App Engine app is running in the // development App Server. func IsDevAppServer() bool { diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go index f4b645aa..6e1d041c 100644 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -2,19 +2,19 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package appengine import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" + "context" ) // BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". +// +// Deprecated: App Engine no longer has a special background context. +// Just use context.Background(). func BackgroundContext() context.Context { - return internal.BackgroundContext() + return context.Background() } diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go index b8dcf8f3..1202fc1a 100644 --- a/vendor/google.golang.org/appengine/identity.go +++ b/vendor/google.golang.org/appengine/identity.go @@ -5,10 +5,9 @@ package appengine import ( + "context" "time" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/app_identity" modpb "google.golang.org/appengine/internal/modules" diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c2..0569f5dd 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) +} - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more - stopFlushing <- 1 // any logging beyond this point will be dropped + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e..87c33c79 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214..5b95c13d 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e4..0f95aa91 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45..5ad3548b 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5..4201b6b5 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d806726..18ddda3a 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312..afd0ae84 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a33..86a8caf0 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go deleted file mode 100644 index 4ec872e4..00000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +++ /dev/null @@ -1,2822 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/socket/socket_service.proto - -package socket - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RemoteSocketServiceError_ErrorCode int32 - -const ( - RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 - RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 - RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 - RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 - RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 - RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 -) - -var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ - 1: "SYSTEM_ERROR", - 2: "GAI_ERROR", - 4: "FAILURE", - 5: "PERMISSION_DENIED", - 6: "INVALID_REQUEST", - 7: "SOCKET_CLOSED", -} -var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ - "SYSTEM_ERROR": 1, - "GAI_ERROR": 2, - "FAILURE": 4, - "PERMISSION_DENIED": 5, - "INVALID_REQUEST": 6, - "SOCKET_CLOSED": 7, -} - -func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { - p := new(RemoteSocketServiceError_ErrorCode) - *p = x - return p -} -func (x RemoteSocketServiceError_ErrorCode) String() string { - return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) -} -func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") - if err != nil { - return err - } - *x = RemoteSocketServiceError_ErrorCode(value) - return nil -} -func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} -} - -type RemoteSocketServiceError_SystemError int32 - -const ( - RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 - RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 - RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 - RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 - RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 - RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 - RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 - RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 - RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 - RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 - RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 - RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 - RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 - RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 - RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 - RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 - RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 - RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 - RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 - RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 - RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 - RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 - RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 - RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 - RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 - RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 - RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 - RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 - RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 - RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 - RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 - RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 - RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 - RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 - RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 - RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 - RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 - RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 - RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 - RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 - RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 - RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 - RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 - RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 - RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 - RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 - RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 - RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 - RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 - RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 - RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 - RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 - RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 - RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 - RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 - RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 - RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 - RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 - RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 - RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 - RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 - RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 - RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 - RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 - RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 - RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 - RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 - RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 - RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 - RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 - RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 - RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 - RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 - RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 - RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 - RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 - RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 - RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 - RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 - RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 - RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 - RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 - RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 - RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 - RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 - RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 - RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 - RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 - RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 - RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 - RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 - RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 - RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 - RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 - RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 - RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 - RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 - RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 - RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 - RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 - RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 - RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 - RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 - RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 - RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 - RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 - RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 - RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 - RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 - RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 - RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 - RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 - RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 - RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 - RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 - RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 - RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 - RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 - RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 - RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 - RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 - RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 - RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 - RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 - RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 - RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 - RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 - RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 -) - -var RemoteSocketServiceError_SystemError_name = map[int32]string{ - 0: "SYS_SUCCESS", - 1: "SYS_EPERM", - 2: "SYS_ENOENT", - 3: "SYS_ESRCH", - 4: "SYS_EINTR", - 5: "SYS_EIO", - 6: "SYS_ENXIO", - 7: "SYS_E2BIG", - 8: "SYS_ENOEXEC", - 9: "SYS_EBADF", - 10: "SYS_ECHILD", - 11: "SYS_EAGAIN", - // Duplicate value: 11: "SYS_EWOULDBLOCK", - 12: "SYS_ENOMEM", - 13: "SYS_EACCES", - 14: "SYS_EFAULT", - 15: "SYS_ENOTBLK", - 16: "SYS_EBUSY", - 17: "SYS_EEXIST", - 18: "SYS_EXDEV", - 19: "SYS_ENODEV", - 20: "SYS_ENOTDIR", - 21: "SYS_EISDIR", - 22: "SYS_EINVAL", - 23: "SYS_ENFILE", - 24: "SYS_EMFILE", - 25: "SYS_ENOTTY", - 26: "SYS_ETXTBSY", - 27: "SYS_EFBIG", - 28: "SYS_ENOSPC", - 29: "SYS_ESPIPE", - 30: "SYS_EROFS", - 31: "SYS_EMLINK", - 32: "SYS_EPIPE", - 33: "SYS_EDOM", - 34: "SYS_ERANGE", - 35: "SYS_EDEADLK", - // Duplicate value: 35: "SYS_EDEADLOCK", - 36: "SYS_ENAMETOOLONG", - 37: "SYS_ENOLCK", - 38: "SYS_ENOSYS", - 39: "SYS_ENOTEMPTY", - 40: "SYS_ELOOP", - 42: "SYS_ENOMSG", - 43: "SYS_EIDRM", - 44: "SYS_ECHRNG", - 45: "SYS_EL2NSYNC", - 46: "SYS_EL3HLT", - 47: "SYS_EL3RST", - 48: "SYS_ELNRNG", - 49: "SYS_EUNATCH", - 50: "SYS_ENOCSI", - 51: "SYS_EL2HLT", - 52: "SYS_EBADE", - 53: "SYS_EBADR", - 54: "SYS_EXFULL", - 55: "SYS_ENOANO", - 56: "SYS_EBADRQC", - 57: "SYS_EBADSLT", - 59: "SYS_EBFONT", - 60: "SYS_ENOSTR", - 61: "SYS_ENODATA", - 62: "SYS_ETIME", - 63: "SYS_ENOSR", - 64: "SYS_ENONET", - 65: "SYS_ENOPKG", - 66: "SYS_EREMOTE", - 67: "SYS_ENOLINK", - 68: "SYS_EADV", - 69: "SYS_ESRMNT", - 70: "SYS_ECOMM", - 71: "SYS_EPROTO", - 72: "SYS_EMULTIHOP", - 73: "SYS_EDOTDOT", - 74: "SYS_EBADMSG", - 75: "SYS_EOVERFLOW", - 76: "SYS_ENOTUNIQ", - 77: "SYS_EBADFD", - 78: "SYS_EREMCHG", - 79: "SYS_ELIBACC", - 80: "SYS_ELIBBAD", - 81: "SYS_ELIBSCN", - 82: "SYS_ELIBMAX", - 83: "SYS_ELIBEXEC", - 84: "SYS_EILSEQ", - 85: "SYS_ERESTART", - 86: "SYS_ESTRPIPE", - 87: "SYS_EUSERS", - 88: "SYS_ENOTSOCK", - 89: "SYS_EDESTADDRREQ", - 90: "SYS_EMSGSIZE", - 91: "SYS_EPROTOTYPE", - 92: "SYS_ENOPROTOOPT", - 93: "SYS_EPROTONOSUPPORT", - 94: "SYS_ESOCKTNOSUPPORT", - 95: "SYS_EOPNOTSUPP", - // Duplicate value: 95: "SYS_ENOTSUP", - 96: "SYS_EPFNOSUPPORT", - 97: "SYS_EAFNOSUPPORT", - 98: "SYS_EADDRINUSE", - 99: "SYS_EADDRNOTAVAIL", - 100: "SYS_ENETDOWN", - 101: "SYS_ENETUNREACH", - 102: "SYS_ENETRESET", - 103: "SYS_ECONNABORTED", - 104: "SYS_ECONNRESET", - 105: "SYS_ENOBUFS", - 106: "SYS_EISCONN", - 107: "SYS_ENOTCONN", - 108: "SYS_ESHUTDOWN", - 109: "SYS_ETOOMANYREFS", - 110: "SYS_ETIMEDOUT", - 111: "SYS_ECONNREFUSED", - 112: "SYS_EHOSTDOWN", - 113: "SYS_EHOSTUNREACH", - 114: "SYS_EALREADY", - 115: "SYS_EINPROGRESS", - 116: "SYS_ESTALE", - 117: "SYS_EUCLEAN", - 118: "SYS_ENOTNAM", - 119: "SYS_ENAVAIL", - 120: "SYS_EISNAM", - 121: "SYS_EREMOTEIO", - 122: "SYS_EDQUOT", - 123: "SYS_ENOMEDIUM", - 124: "SYS_EMEDIUMTYPE", - 125: "SYS_ECANCELED", - 126: "SYS_ENOKEY", - 127: "SYS_EKEYEXPIRED", - 128: "SYS_EKEYREVOKED", - 129: "SYS_EKEYREJECTED", - 130: "SYS_EOWNERDEAD", - 131: "SYS_ENOTRECOVERABLE", - 132: "SYS_ERFKILL", -} -var RemoteSocketServiceError_SystemError_value = map[string]int32{ - "SYS_SUCCESS": 0, - "SYS_EPERM": 1, - "SYS_ENOENT": 2, - "SYS_ESRCH": 3, - "SYS_EINTR": 4, - "SYS_EIO": 5, - "SYS_ENXIO": 6, - "SYS_E2BIG": 7, - "SYS_ENOEXEC": 8, - "SYS_EBADF": 9, - "SYS_ECHILD": 10, - "SYS_EAGAIN": 11, - "SYS_EWOULDBLOCK": 11, - "SYS_ENOMEM": 12, - "SYS_EACCES": 13, - "SYS_EFAULT": 14, - "SYS_ENOTBLK": 15, - "SYS_EBUSY": 16, - "SYS_EEXIST": 17, - "SYS_EXDEV": 18, - "SYS_ENODEV": 19, - "SYS_ENOTDIR": 20, - "SYS_EISDIR": 21, - "SYS_EINVAL": 22, - "SYS_ENFILE": 23, - "SYS_EMFILE": 24, - "SYS_ENOTTY": 25, - "SYS_ETXTBSY": 26, - "SYS_EFBIG": 27, - "SYS_ENOSPC": 28, - "SYS_ESPIPE": 29, - "SYS_EROFS": 30, - "SYS_EMLINK": 31, - "SYS_EPIPE": 32, - "SYS_EDOM": 33, - "SYS_ERANGE": 34, - "SYS_EDEADLK": 35, - "SYS_EDEADLOCK": 35, - "SYS_ENAMETOOLONG": 36, - "SYS_ENOLCK": 37, - "SYS_ENOSYS": 38, - "SYS_ENOTEMPTY": 39, - "SYS_ELOOP": 40, - "SYS_ENOMSG": 42, - "SYS_EIDRM": 43, - "SYS_ECHRNG": 44, - "SYS_EL2NSYNC": 45, - "SYS_EL3HLT": 46, - "SYS_EL3RST": 47, - "SYS_ELNRNG": 48, - "SYS_EUNATCH": 49, - "SYS_ENOCSI": 50, - "SYS_EL2HLT": 51, - "SYS_EBADE": 52, - "SYS_EBADR": 53, - "SYS_EXFULL": 54, - "SYS_ENOANO": 55, - "SYS_EBADRQC": 56, - "SYS_EBADSLT": 57, - "SYS_EBFONT": 59, - "SYS_ENOSTR": 60, - "SYS_ENODATA": 61, - "SYS_ETIME": 62, - "SYS_ENOSR": 63, - "SYS_ENONET": 64, - "SYS_ENOPKG": 65, - "SYS_EREMOTE": 66, - "SYS_ENOLINK": 67, - "SYS_EADV": 68, - "SYS_ESRMNT": 69, - "SYS_ECOMM": 70, - "SYS_EPROTO": 71, - "SYS_EMULTIHOP": 72, - "SYS_EDOTDOT": 73, - "SYS_EBADMSG": 74, - "SYS_EOVERFLOW": 75, - "SYS_ENOTUNIQ": 76, - "SYS_EBADFD": 77, - "SYS_EREMCHG": 78, - "SYS_ELIBACC": 79, - "SYS_ELIBBAD": 80, - "SYS_ELIBSCN": 81, - "SYS_ELIBMAX": 82, - "SYS_ELIBEXEC": 83, - "SYS_EILSEQ": 84, - "SYS_ERESTART": 85, - "SYS_ESTRPIPE": 86, - "SYS_EUSERS": 87, - "SYS_ENOTSOCK": 88, - "SYS_EDESTADDRREQ": 89, - "SYS_EMSGSIZE": 90, - "SYS_EPROTOTYPE": 91, - "SYS_ENOPROTOOPT": 92, - "SYS_EPROTONOSUPPORT": 93, - "SYS_ESOCKTNOSUPPORT": 94, - "SYS_EOPNOTSUPP": 95, - "SYS_ENOTSUP": 95, - "SYS_EPFNOSUPPORT": 96, - "SYS_EAFNOSUPPORT": 97, - "SYS_EADDRINUSE": 98, - "SYS_EADDRNOTAVAIL": 99, - "SYS_ENETDOWN": 100, - "SYS_ENETUNREACH": 101, - "SYS_ENETRESET": 102, - "SYS_ECONNABORTED": 103, - "SYS_ECONNRESET": 104, - "SYS_ENOBUFS": 105, - "SYS_EISCONN": 106, - "SYS_ENOTCONN": 107, - "SYS_ESHUTDOWN": 108, - "SYS_ETOOMANYREFS": 109, - "SYS_ETIMEDOUT": 110, - "SYS_ECONNREFUSED": 111, - "SYS_EHOSTDOWN": 112, - "SYS_EHOSTUNREACH": 113, - "SYS_EALREADY": 114, - "SYS_EINPROGRESS": 115, - "SYS_ESTALE": 116, - "SYS_EUCLEAN": 117, - "SYS_ENOTNAM": 118, - "SYS_ENAVAIL": 119, - "SYS_EISNAM": 120, - "SYS_EREMOTEIO": 121, - "SYS_EDQUOT": 122, - "SYS_ENOMEDIUM": 123, - "SYS_EMEDIUMTYPE": 124, - "SYS_ECANCELED": 125, - "SYS_ENOKEY": 126, - "SYS_EKEYEXPIRED": 127, - "SYS_EKEYREVOKED": 128, - "SYS_EKEYREJECTED": 129, - "SYS_EOWNERDEAD": 130, - "SYS_ENOTRECOVERABLE": 131, - "SYS_ERFKILL": 132, -} - -func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { - p := new(RemoteSocketServiceError_SystemError) - *p = x - return p -} -func (x RemoteSocketServiceError_SystemError) String() string { - return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) -} -func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") - if err != nil { - return err - } - *x = RemoteSocketServiceError_SystemError(value) - return nil -} -func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} -} - -type CreateSocketRequest_SocketFamily int32 - -const ( - CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 - CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 -) - -var CreateSocketRequest_SocketFamily_name = map[int32]string{ - 1: "IPv4", - 2: "IPv6", -} -var CreateSocketRequest_SocketFamily_value = map[string]int32{ - "IPv4": 1, - "IPv6": 2, -} - -func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { - p := new(CreateSocketRequest_SocketFamily) - *p = x - return p -} -func (x CreateSocketRequest_SocketFamily) String() string { - return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) -} -func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketFamily(value) - return nil -} -func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} -} - -type CreateSocketRequest_SocketProtocol int32 - -const ( - CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 - CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 -) - -var CreateSocketRequest_SocketProtocol_name = map[int32]string{ - 1: "TCP", - 2: "UDP", -} -var CreateSocketRequest_SocketProtocol_value = map[string]int32{ - "TCP": 1, - "UDP": 2, -} - -func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { - p := new(CreateSocketRequest_SocketProtocol) - *p = x - return p -} -func (x CreateSocketRequest_SocketProtocol) String() string { - return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) -} -func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketProtocol(value) - return nil -} -func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} -} - -type SocketOption_SocketOptionLevel int32 - -const ( - SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 - SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 - SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 - SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 -) - -var SocketOption_SocketOptionLevel_name = map[int32]string{ - 0: "SOCKET_SOL_IP", - 1: "SOCKET_SOL_SOCKET", - 6: "SOCKET_SOL_TCP", - 17: "SOCKET_SOL_UDP", -} -var SocketOption_SocketOptionLevel_value = map[string]int32{ - "SOCKET_SOL_IP": 0, - "SOCKET_SOL_SOCKET": 1, - "SOCKET_SOL_TCP": 6, - "SOCKET_SOL_UDP": 17, -} - -func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { - p := new(SocketOption_SocketOptionLevel) - *p = x - return p -} -func (x SocketOption_SocketOptionLevel) String() string { - return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) -} -func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") - if err != nil { - return err - } - *x = SocketOption_SocketOptionLevel(value) - return nil -} -func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} -} - -type SocketOption_SocketOptionName int32 - -const ( - SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 - SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 - SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 - SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 - SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 -) - -var SocketOption_SocketOptionName_name = map[int32]string{ - 1: "SOCKET_SO_DEBUG", - 2: "SOCKET_SO_REUSEADDR", - 3: "SOCKET_SO_TYPE", - 4: "SOCKET_SO_ERROR", - 5: "SOCKET_SO_DONTROUTE", - 6: "SOCKET_SO_BROADCAST", - 7: "SOCKET_SO_SNDBUF", - 8: "SOCKET_SO_RCVBUF", - 9: "SOCKET_SO_KEEPALIVE", - 10: "SOCKET_SO_OOBINLINE", - 13: "SOCKET_SO_LINGER", - 20: "SOCKET_SO_RCVTIMEO", - 21: "SOCKET_SO_SNDTIMEO", - // Duplicate value: 1: "SOCKET_IP_TOS", - // Duplicate value: 2: "SOCKET_IP_TTL", - // Duplicate value: 3: "SOCKET_IP_HDRINCL", - // Duplicate value: 4: "SOCKET_IP_OPTIONS", - // Duplicate value: 1: "SOCKET_TCP_NODELAY", - // Duplicate value: 2: "SOCKET_TCP_MAXSEG", - // Duplicate value: 3: "SOCKET_TCP_CORK", - // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", - // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", - // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", - // Duplicate value: 7: "SOCKET_TCP_SYNCNT", - // Duplicate value: 8: "SOCKET_TCP_LINGER2", - // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", - // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", - 11: "SOCKET_TCP_INFO", - 12: "SOCKET_TCP_QUICKACK", -} -var SocketOption_SocketOptionName_value = map[string]int32{ - "SOCKET_SO_DEBUG": 1, - "SOCKET_SO_REUSEADDR": 2, - "SOCKET_SO_TYPE": 3, - "SOCKET_SO_ERROR": 4, - "SOCKET_SO_DONTROUTE": 5, - "SOCKET_SO_BROADCAST": 6, - "SOCKET_SO_SNDBUF": 7, - "SOCKET_SO_RCVBUF": 8, - "SOCKET_SO_KEEPALIVE": 9, - "SOCKET_SO_OOBINLINE": 10, - "SOCKET_SO_LINGER": 13, - "SOCKET_SO_RCVTIMEO": 20, - "SOCKET_SO_SNDTIMEO": 21, - "SOCKET_IP_TOS": 1, - "SOCKET_IP_TTL": 2, - "SOCKET_IP_HDRINCL": 3, - "SOCKET_IP_OPTIONS": 4, - "SOCKET_TCP_NODELAY": 1, - "SOCKET_TCP_MAXSEG": 2, - "SOCKET_TCP_CORK": 3, - "SOCKET_TCP_KEEPIDLE": 4, - "SOCKET_TCP_KEEPINTVL": 5, - "SOCKET_TCP_KEEPCNT": 6, - "SOCKET_TCP_SYNCNT": 7, - "SOCKET_TCP_LINGER2": 8, - "SOCKET_TCP_DEFER_ACCEPT": 9, - "SOCKET_TCP_WINDOW_CLAMP": 10, - "SOCKET_TCP_INFO": 11, - "SOCKET_TCP_QUICKACK": 12, -} - -func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { - p := new(SocketOption_SocketOptionName) - *p = x - return p -} -func (x SocketOption_SocketOptionName) String() string { - return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) -} -func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") - if err != nil { - return err - } - *x = SocketOption_SocketOptionName(value) - return nil -} -func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} -} - -type ShutDownRequest_How int32 - -const ( - ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 - ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 - ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 -) - -var ShutDownRequest_How_name = map[int32]string{ - 1: "SOCKET_SHUT_RD", - 2: "SOCKET_SHUT_WR", - 3: "SOCKET_SHUT_RDWR", -} -var ShutDownRequest_How_value = map[string]int32{ - "SOCKET_SHUT_RD": 1, - "SOCKET_SHUT_WR": 2, - "SOCKET_SHUT_RDWR": 3, -} - -func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { - p := new(ShutDownRequest_How) - *p = x - return p -} -func (x ShutDownRequest_How) String() string { - return proto.EnumName(ShutDownRequest_How_name, int32(x)) -} -func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") - if err != nil { - return err - } - *x = ShutDownRequest_How(value) - return nil -} -func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} -} - -type ReceiveRequest_Flags int32 - -const ( - ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 - ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 -) - -var ReceiveRequest_Flags_name = map[int32]string{ - 1: "MSG_OOB", - 2: "MSG_PEEK", -} -var ReceiveRequest_Flags_value = map[string]int32{ - "MSG_OOB": 1, - "MSG_PEEK": 2, -} - -func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { - p := new(ReceiveRequest_Flags) - *p = x - return p -} -func (x ReceiveRequest_Flags) String() string { - return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) -} -func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") - if err != nil { - return err - } - *x = ReceiveRequest_Flags(value) - return nil -} -func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} -} - -type PollEvent_PollEventFlag int32 - -const ( - PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 - PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 - PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 - PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 - PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 - PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 - PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 - PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 - PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 - PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 - PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 - PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 - PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 - PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 -) - -var PollEvent_PollEventFlag_name = map[int32]string{ - 0: "SOCKET_POLLNONE", - 1: "SOCKET_POLLIN", - 2: "SOCKET_POLLPRI", - 4: "SOCKET_POLLOUT", - 8: "SOCKET_POLLERR", - 16: "SOCKET_POLLHUP", - 32: "SOCKET_POLLNVAL", - 64: "SOCKET_POLLRDNORM", - 128: "SOCKET_POLLRDBAND", - 256: "SOCKET_POLLWRNORM", - 512: "SOCKET_POLLWRBAND", - 1024: "SOCKET_POLLMSG", - 4096: "SOCKET_POLLREMOVE", - 8192: "SOCKET_POLLRDHUP", -} -var PollEvent_PollEventFlag_value = map[string]int32{ - "SOCKET_POLLNONE": 0, - "SOCKET_POLLIN": 1, - "SOCKET_POLLPRI": 2, - "SOCKET_POLLOUT": 4, - "SOCKET_POLLERR": 8, - "SOCKET_POLLHUP": 16, - "SOCKET_POLLNVAL": 32, - "SOCKET_POLLRDNORM": 64, - "SOCKET_POLLRDBAND": 128, - "SOCKET_POLLWRNORM": 256, - "SOCKET_POLLWRBAND": 512, - "SOCKET_POLLMSG": 1024, - "SOCKET_POLLREMOVE": 4096, - "SOCKET_POLLRDHUP": 8192, -} - -func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { - p := new(PollEvent_PollEventFlag) - *p = x - return p -} -func (x PollEvent_PollEventFlag) String() string { - return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) -} -func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") - if err != nil { - return err - } - *x = PollEvent_PollEventFlag(value) - return nil -} -func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} -} - -type ResolveReply_ErrorCode int32 - -const ( - ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 - ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 - ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 - ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 - ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 - ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 - ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 - ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 - ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 - ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 - ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 - ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 - ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 - ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 - ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 -) - -var ResolveReply_ErrorCode_name = map[int32]string{ - 1: "SOCKET_EAI_ADDRFAMILY", - 2: "SOCKET_EAI_AGAIN", - 3: "SOCKET_EAI_BADFLAGS", - 4: "SOCKET_EAI_FAIL", - 5: "SOCKET_EAI_FAMILY", - 6: "SOCKET_EAI_MEMORY", - 7: "SOCKET_EAI_NODATA", - 8: "SOCKET_EAI_NONAME", - 9: "SOCKET_EAI_SERVICE", - 10: "SOCKET_EAI_SOCKTYPE", - 11: "SOCKET_EAI_SYSTEM", - 12: "SOCKET_EAI_BADHINTS", - 13: "SOCKET_EAI_PROTOCOL", - 14: "SOCKET_EAI_OVERFLOW", - 15: "SOCKET_EAI_MAX", -} -var ResolveReply_ErrorCode_value = map[string]int32{ - "SOCKET_EAI_ADDRFAMILY": 1, - "SOCKET_EAI_AGAIN": 2, - "SOCKET_EAI_BADFLAGS": 3, - "SOCKET_EAI_FAIL": 4, - "SOCKET_EAI_FAMILY": 5, - "SOCKET_EAI_MEMORY": 6, - "SOCKET_EAI_NODATA": 7, - "SOCKET_EAI_NONAME": 8, - "SOCKET_EAI_SERVICE": 9, - "SOCKET_EAI_SOCKTYPE": 10, - "SOCKET_EAI_SYSTEM": 11, - "SOCKET_EAI_BADHINTS": 12, - "SOCKET_EAI_PROTOCOL": 13, - "SOCKET_EAI_OVERFLOW": 14, - "SOCKET_EAI_MAX": 15, -} - -func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { - p := new(ResolveReply_ErrorCode) - *p = x - return p -} -func (x ResolveReply_ErrorCode) String() string { - return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) -} -func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") - if err != nil { - return err - } - *x = ResolveReply_ErrorCode(value) - return nil -} -func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} -} - -type RemoteSocketServiceError struct { - SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` - ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } -func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } -func (*RemoteSocketServiceError) ProtoMessage() {} -func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} -} -func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) -} -func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) -} -func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) -} -func (m *RemoteSocketServiceError) XXX_Size() int { - return xxx_messageInfo_RemoteSocketServiceError.Size(m) -} -func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo - -const Default_RemoteSocketServiceError_SystemError int32 = 0 - -func (m *RemoteSocketServiceError) GetSystemError() int32 { - if m != nil && m.SystemError != nil { - return *m.SystemError - } - return Default_RemoteSocketServiceError_SystemError -} - -func (m *RemoteSocketServiceError) GetErrorDetail() string { - if m != nil && m.ErrorDetail != nil { - return *m.ErrorDetail - } - return "" -} - -type AddressPort struct { - Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` - PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddressPort) Reset() { *m = AddressPort{} } -func (m *AddressPort) String() string { return proto.CompactTextString(m) } -func (*AddressPort) ProtoMessage() {} -func (*AddressPort) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} -} -func (m *AddressPort) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddressPort.Unmarshal(m, b) -} -func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) -} -func (dst *AddressPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddressPort.Merge(dst, src) -} -func (m *AddressPort) XXX_Size() int { - return xxx_messageInfo_AddressPort.Size(m) -} -func (m *AddressPort) XXX_DiscardUnknown() { - xxx_messageInfo_AddressPort.DiscardUnknown(m) -} - -var xxx_messageInfo_AddressPort proto.InternalMessageInfo - -func (m *AddressPort) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return 0 -} - -func (m *AddressPort) GetPackedAddress() []byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *AddressPort) GetHostnameHint() string { - if m != nil && m.HostnameHint != nil { - return *m.HostnameHint - } - return "" -} - -type CreateSocketRequest struct { - Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` - Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` - SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` - ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } -func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSocketRequest) ProtoMessage() {} -func (*CreateSocketRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} -} -func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) -} -func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) -} -func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketRequest.Merge(dst, src) -} -func (m *CreateSocketRequest) XXX_Size() int { - return xxx_messageInfo_CreateSocketRequest.Size(m) -} -func (m *CreateSocketRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo - -const Default_CreateSocketRequest_ListenBacklog int32 = 0 - -func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { - if m != nil && m.Family != nil { - return *m.Family - } - return CreateSocketRequest_IPv4 -} - -func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return CreateSocketRequest_TCP -} - -func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { - if m != nil { - return m.SocketOptions - } - return nil -} - -func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -func (m *CreateSocketRequest) GetListenBacklog() int32 { - if m != nil && m.ListenBacklog != nil { - return *m.ListenBacklog - } - return Default_CreateSocketRequest_ListenBacklog -} - -func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *CreateSocketRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CreateSocketRequest) GetProjectId() int64 { - if m != nil && m.ProjectId != nil { - return *m.ProjectId - } - return 0 -} - -type CreateSocketReply struct { - SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } -func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } -func (*CreateSocketReply) ProtoMessage() {} -func (*CreateSocketReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} -} - -var extRange_CreateSocketReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_CreateSocketReply -} -func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) -} -func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) -} -func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketReply.Merge(dst, src) -} -func (m *CreateSocketReply) XXX_Size() int { - return xxx_messageInfo_CreateSocketReply.Size(m) -} -func (m *CreateSocketReply) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo - -func (m *CreateSocketReply) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CreateSocketReply) GetServerAddress() *AddressPort { - if m != nil { - return m.ServerAddress - } - return nil -} - -func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindRequest) Reset() { *m = BindRequest{} } -func (m *BindRequest) String() string { return proto.CompactTextString(m) } -func (*BindRequest) ProtoMessage() {} -func (*BindRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} -} -func (m *BindRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindRequest.Unmarshal(m, b) -} -func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) -} -func (dst *BindRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindRequest.Merge(dst, src) -} -func (m *BindRequest) XXX_Size() int { - return xxx_messageInfo_BindRequest.Size(m) -} -func (m *BindRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BindRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BindRequest proto.InternalMessageInfo - -func (m *BindRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *BindRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindReply) Reset() { *m = BindReply{} } -func (m *BindReply) String() string { return proto.CompactTextString(m) } -func (*BindReply) ProtoMessage() {} -func (*BindReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} -} -func (m *BindReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindReply.Unmarshal(m, b) -} -func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) -} -func (dst *BindReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindReply.Merge(dst, src) -} -func (m *BindReply) XXX_Size() int { - return xxx_messageInfo_BindReply.Size(m) -} -func (m *BindReply) XXX_DiscardUnknown() { - xxx_messageInfo_BindReply.DiscardUnknown(m) -} - -var xxx_messageInfo_BindReply proto.InternalMessageInfo - -func (m *BindReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetSocketNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } -func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameRequest) ProtoMessage() {} -func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} -} -func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) -} -func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) -} -func (m *GetSocketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketNameRequest.Size(m) -} -func (m *GetSocketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo - -func (m *GetSocketNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetSocketNameReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } -func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameReply) ProtoMessage() {} -func (*GetSocketNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} -} -func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) -} -func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameReply.Merge(dst, src) -} -func (m *GetSocketNameReply) XXX_Size() int { - return xxx_messageInfo_GetSocketNameReply.Size(m) -} -func (m *GetSocketNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo - -func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetPeerNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } -func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameRequest) ProtoMessage() {} -func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} -} -func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) -} -func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) -} -func (m *GetPeerNameRequest) XXX_Size() int { - return xxx_messageInfo_GetPeerNameRequest.Size(m) -} -func (m *GetPeerNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo - -func (m *GetPeerNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetPeerNameReply struct { - PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } -func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameReply) ProtoMessage() {} -func (*GetPeerNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} -} -func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) -} -func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameReply.Merge(dst, src) -} -func (m *GetPeerNameReply) XXX_Size() int { - return xxx_messageInfo_GetPeerNameReply.Size(m) -} -func (m *GetPeerNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo - -func (m *GetPeerNameReply) GetPeerIp() *AddressPort { - if m != nil { - return m.PeerIp - } - return nil -} - -type SocketOption struct { - Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` - Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` - Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SocketOption) Reset() { *m = SocketOption{} } -func (m *SocketOption) String() string { return proto.CompactTextString(m) } -func (*SocketOption) ProtoMessage() {} -func (*SocketOption) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} -} -func (m *SocketOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SocketOption.Unmarshal(m, b) -} -func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) -} -func (dst *SocketOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_SocketOption.Merge(dst, src) -} -func (m *SocketOption) XXX_Size() int { - return xxx_messageInfo_SocketOption.Size(m) -} -func (m *SocketOption) XXX_DiscardUnknown() { - xxx_messageInfo_SocketOption.DiscardUnknown(m) -} - -var xxx_messageInfo_SocketOption proto.InternalMessageInfo - -func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { - if m != nil && m.Level != nil { - return *m.Level - } - return SocketOption_SOCKET_SOL_IP -} - -func (m *SocketOption) GetOption() SocketOption_SocketOptionName { - if m != nil && m.Option != nil { - return *m.Option - } - return SocketOption_SOCKET_SO_DEBUG -} - -func (m *SocketOption) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type SetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } -func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsRequest) ProtoMessage() {} -func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} -} -func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) -} -func (m *SetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsRequest.Size(m) -} -func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo - -func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type SetSocketOptionsReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } -func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsReply) ProtoMessage() {} -func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} -} -func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) -} -func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) -} -func (m *SetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsReply.Size(m) -} -func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo - -type GetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } -func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsRequest) ProtoMessage() {} -func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} -} -func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) -} -func (m *GetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsRequest.Size(m) -} -func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo - -func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type GetSocketOptionsReply struct { - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } -func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsReply) ProtoMessage() {} -func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} -} -func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) -} -func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) -} -func (m *GetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsReply.Size(m) -} -func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo - -func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type ConnectRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } -func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } -func (*ConnectRequest) ProtoMessage() {} -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} -} -func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) -} -func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) -} -func (dst *ConnectRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectRequest.Merge(dst, src) -} -func (m *ConnectRequest) XXX_Size() int { - return xxx_messageInfo_ConnectRequest.Size(m) -} -func (m *ConnectRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo - -const Default_ConnectRequest_TimeoutSeconds float64 = -1 - -func (m *ConnectRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ConnectRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *ConnectRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ConnectRequest_TimeoutSeconds -} - -type ConnectReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectReply) Reset() { *m = ConnectReply{} } -func (m *ConnectReply) String() string { return proto.CompactTextString(m) } -func (*ConnectReply) ProtoMessage() {} -func (*ConnectReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} -} - -var extRange_ConnectReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ConnectReply -} -func (m *ConnectReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectReply.Unmarshal(m, b) -} -func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) -} -func (dst *ConnectReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectReply.Merge(dst, src) -} -func (m *ConnectReply) XXX_Size() int { - return xxx_messageInfo_ConnectReply.Size(m) -} -func (m *ConnectReply) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectReply proto.InternalMessageInfo - -func (m *ConnectReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type ListenRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenRequest) Reset() { *m = ListenRequest{} } -func (m *ListenRequest) String() string { return proto.CompactTextString(m) } -func (*ListenRequest) ProtoMessage() {} -func (*ListenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} -} -func (m *ListenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenRequest.Unmarshal(m, b) -} -func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) -} -func (dst *ListenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenRequest.Merge(dst, src) -} -func (m *ListenRequest) XXX_Size() int { - return xxx_messageInfo_ListenRequest.Size(m) -} -func (m *ListenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenRequest proto.InternalMessageInfo - -func (m *ListenRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ListenRequest) GetBacklog() int32 { - if m != nil && m.Backlog != nil { - return *m.Backlog - } - return 0 -} - -type ListenReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenReply) Reset() { *m = ListenReply{} } -func (m *ListenReply) String() string { return proto.CompactTextString(m) } -func (*ListenReply) ProtoMessage() {} -func (*ListenReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} -} -func (m *ListenReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenReply.Unmarshal(m, b) -} -func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) -} -func (dst *ListenReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenReply.Merge(dst, src) -} -func (m *ListenReply) XXX_Size() int { - return xxx_messageInfo_ListenReply.Size(m) -} -func (m *ListenReply) XXX_DiscardUnknown() { - xxx_messageInfo_ListenReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenReply proto.InternalMessageInfo - -type AcceptRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } -func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } -func (*AcceptRequest) ProtoMessage() {} -func (*AcceptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} -} -func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) -} -func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) -} -func (dst *AcceptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptRequest.Merge(dst, src) -} -func (m *AcceptRequest) XXX_Size() int { - return xxx_messageInfo_AcceptRequest.Size(m) -} -func (m *AcceptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo - -const Default_AcceptRequest_TimeoutSeconds float64 = -1 - -func (m *AcceptRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *AcceptRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_AcceptRequest_TimeoutSeconds -} - -type AcceptReply struct { - NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` - RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptReply) Reset() { *m = AcceptReply{} } -func (m *AcceptReply) String() string { return proto.CompactTextString(m) } -func (*AcceptReply) ProtoMessage() {} -func (*AcceptReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} -} -func (m *AcceptReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptReply.Unmarshal(m, b) -} -func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) -} -func (dst *AcceptReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptReply.Merge(dst, src) -} -func (m *AcceptReply) XXX_Size() int { - return xxx_messageInfo_AcceptReply.Size(m) -} -func (m *AcceptReply) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptReply.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptReply proto.InternalMessageInfo - -func (m *AcceptReply) GetNewSocketDescriptor() []byte { - if m != nil { - return m.NewSocketDescriptor - } - return nil -} - -func (m *AcceptReply) GetRemoteAddress() *AddressPort { - if m != nil { - return m.RemoteAddress - } - return nil -} - -type ShutDownRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` - SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } -func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } -func (*ShutDownRequest) ProtoMessage() {} -func (*ShutDownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} -} -func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) -} -func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) -} -func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownRequest.Merge(dst, src) -} -func (m *ShutDownRequest) XXX_Size() int { - return xxx_messageInfo_ShutDownRequest.Size(m) -} -func (m *ShutDownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo - -func (m *ShutDownRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ShutDownRequest) GetHow() ShutDownRequest_How { - if m != nil && m.How != nil { - return *m.How - } - return ShutDownRequest_SOCKET_SHUT_RD -} - -func (m *ShutDownRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return 0 -} - -type ShutDownReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } -func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } -func (*ShutDownReply) ProtoMessage() {} -func (*ShutDownReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} -} -func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) -} -func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) -} -func (dst *ShutDownReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownReply.Merge(dst, src) -} -func (m *ShutDownReply) XXX_Size() int { - return xxx_messageInfo_ShutDownReply.Size(m) -} -func (m *ShutDownReply) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo - -type CloseRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseRequest) Reset() { *m = CloseRequest{} } -func (m *CloseRequest) String() string { return proto.CompactTextString(m) } -func (*CloseRequest) ProtoMessage() {} -func (*CloseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} -} -func (m *CloseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseRequest.Unmarshal(m, b) -} -func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) -} -func (dst *CloseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseRequest.Merge(dst, src) -} -func (m *CloseRequest) XXX_Size() int { - return xxx_messageInfo_CloseRequest.Size(m) -} -func (m *CloseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseRequest proto.InternalMessageInfo - -const Default_CloseRequest_SendOffset int64 = -1 - -func (m *CloseRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CloseRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return Default_CloseRequest_SendOffset -} - -type CloseReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseReply) Reset() { *m = CloseReply{} } -func (m *CloseReply) String() string { return proto.CompactTextString(m) } -func (*CloseReply) ProtoMessage() {} -func (*CloseReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} -} -func (m *CloseReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseReply.Unmarshal(m, b) -} -func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) -} -func (dst *CloseReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseReply.Merge(dst, src) -} -func (m *CloseReply) XXX_Size() int { - return xxx_messageInfo_CloseReply.Size(m) -} -func (m *CloseReply) XXX_DiscardUnknown() { - xxx_messageInfo_CloseReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseReply proto.InternalMessageInfo - -type SendRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` - StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` - SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendRequest) Reset() { *m = SendRequest{} } -func (m *SendRequest) String() string { return proto.CompactTextString(m) } -func (*SendRequest) ProtoMessage() {} -func (*SendRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} -} -func (m *SendRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendRequest.Unmarshal(m, b) -} -func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) -} -func (dst *SendRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendRequest.Merge(dst, src) -} -func (m *SendRequest) XXX_Size() int { - return xxx_messageInfo_SendRequest.Size(m) -} -func (m *SendRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendRequest proto.InternalMessageInfo - -const Default_SendRequest_Flags int32 = 0 -const Default_SendRequest_TimeoutSeconds float64 = -1 - -func (m *SendRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SendRequest) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *SendRequest) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *SendRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_SendRequest_Flags -} - -func (m *SendRequest) GetSendTo() *AddressPort { - if m != nil { - return m.SendTo - } - return nil -} - -func (m *SendRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_SendRequest_TimeoutSeconds -} - -type SendReply struct { - DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendReply) Reset() { *m = SendReply{} } -func (m *SendReply) String() string { return proto.CompactTextString(m) } -func (*SendReply) ProtoMessage() {} -func (*SendReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} -} -func (m *SendReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendReply.Unmarshal(m, b) -} -func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) -} -func (dst *SendReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendReply.Merge(dst, src) -} -func (m *SendReply) XXX_Size() int { - return xxx_messageInfo_SendReply.Size(m) -} -func (m *SendReply) XXX_DiscardUnknown() { - xxx_messageInfo_SendReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SendReply proto.InternalMessageInfo - -func (m *SendReply) GetDataSent() int32 { - if m != nil && m.DataSent != nil { - return *m.DataSent - } - return 0 -} - -type ReceiveRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` - Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } -func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } -func (*ReceiveRequest) ProtoMessage() {} -func (*ReceiveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} -} -func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) -} -func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) -} -func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveRequest.Merge(dst, src) -} -func (m *ReceiveRequest) XXX_Size() int { - return xxx_messageInfo_ReceiveRequest.Size(m) -} -func (m *ReceiveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo - -const Default_ReceiveRequest_Flags int32 = 0 -const Default_ReceiveRequest_TimeoutSeconds float64 = -1 - -func (m *ReceiveRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ReceiveRequest) GetDataSize() int32 { - if m != nil && m.DataSize != nil { - return *m.DataSize - } - return 0 -} - -func (m *ReceiveRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_ReceiveRequest_Flags -} - -func (m *ReceiveRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ReceiveRequest_TimeoutSeconds -} - -type ReceiveReply struct { - StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` - BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } -func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } -func (*ReceiveReply) ProtoMessage() {} -func (*ReceiveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} -} -func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) -} -func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) -} -func (dst *ReceiveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveReply.Merge(dst, src) -} -func (m *ReceiveReply) XXX_Size() int { - return xxx_messageInfo_ReceiveReply.Size(m) -} -func (m *ReceiveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo - -func (m *ReceiveReply) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *ReceiveReply) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ReceiveReply) GetReceivedFrom() *AddressPort { - if m != nil { - return m.ReceivedFrom - } - return nil -} - -func (m *ReceiveReply) GetBufferSize() int32 { - if m != nil && m.BufferSize != nil { - return *m.BufferSize - } - return 0 -} - -type PollEvent struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` - ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollEvent) Reset() { *m = PollEvent{} } -func (m *PollEvent) String() string { return proto.CompactTextString(m) } -func (*PollEvent) ProtoMessage() {} -func (*PollEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} -} -func (m *PollEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollEvent.Unmarshal(m, b) -} -func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) -} -func (dst *PollEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollEvent.Merge(dst, src) -} -func (m *PollEvent) XXX_Size() int { - return xxx_messageInfo_PollEvent.Size(m) -} -func (m *PollEvent) XXX_DiscardUnknown() { - xxx_messageInfo_PollEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_PollEvent proto.InternalMessageInfo - -func (m *PollEvent) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *PollEvent) GetRequestedEvents() int32 { - if m != nil && m.RequestedEvents != nil { - return *m.RequestedEvents - } - return 0 -} - -func (m *PollEvent) GetObservedEvents() int32 { - if m != nil && m.ObservedEvents != nil { - return *m.ObservedEvents - } - return 0 -} - -type PollRequest struct { - Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollRequest) Reset() { *m = PollRequest{} } -func (m *PollRequest) String() string { return proto.CompactTextString(m) } -func (*PollRequest) ProtoMessage() {} -func (*PollRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} -} -func (m *PollRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollRequest.Unmarshal(m, b) -} -func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) -} -func (dst *PollRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollRequest.Merge(dst, src) -} -func (m *PollRequest) XXX_Size() int { - return xxx_messageInfo_PollRequest.Size(m) -} -func (m *PollRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PollRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PollRequest proto.InternalMessageInfo - -const Default_PollRequest_TimeoutSeconds float64 = -1 - -func (m *PollRequest) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -func (m *PollRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_PollRequest_TimeoutSeconds -} - -type PollReply struct { - Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollReply) Reset() { *m = PollReply{} } -func (m *PollReply) String() string { return proto.CompactTextString(m) } -func (*PollReply) ProtoMessage() {} -func (*PollReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} -} -func (m *PollReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollReply.Unmarshal(m, b) -} -func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) -} -func (dst *PollReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollReply.Merge(dst, src) -} -func (m *PollReply) XXX_Size() int { - return xxx_messageInfo_PollReply.Size(m) -} -func (m *PollReply) XXX_DiscardUnknown() { - xxx_messageInfo_PollReply.DiscardUnknown(m) -} - -var xxx_messageInfo_PollReply proto.InternalMessageInfo - -func (m *PollReply) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -type ResolveRequest struct { - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } -func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } -func (*ResolveRequest) ProtoMessage() {} -func (*ResolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} -} -func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) -} -func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) -} -func (dst *ResolveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveRequest.Merge(dst, src) -} -func (m *ResolveRequest) XXX_Size() int { - return xxx_messageInfo_ResolveRequest.Size(m) -} -func (m *ResolveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo - -func (m *ResolveRequest) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { - if m != nil { - return m.AddressFamilies - } - return nil -} - -type ResolveReply struct { - PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` - Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveReply) Reset() { *m = ResolveReply{} } -func (m *ResolveReply) String() string { return proto.CompactTextString(m) } -func (*ResolveReply) ProtoMessage() {} -func (*ResolveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} -} -func (m *ResolveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveReply.Unmarshal(m, b) -} -func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) -} -func (dst *ResolveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveReply.Merge(dst, src) -} -func (m *ResolveReply) XXX_Size() int { - return xxx_messageInfo_ResolveReply.Size(m) -} -func (m *ResolveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveReply proto.InternalMessageInfo - -func (m *ResolveReply) GetPackedAddress() [][]byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *ResolveReply) GetCanonicalName() string { - if m != nil && m.CanonicalName != nil { - return *m.CanonicalName - } - return "" -} - -func (m *ResolveReply) GetAliases() []string { - if m != nil { - return m.Aliases - } - return nil -} - -func init() { - proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") - proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") - proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") - proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") - proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") - proto.RegisterType((*BindReply)(nil), "appengine.BindReply") - proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") - proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") - proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") - proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") - proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") - proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") - proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") - proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") - proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") - proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") - proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") - proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") - proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") - proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") - proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") - proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") - proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") - proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") - proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") - proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") - proto.RegisterType((*SendReply)(nil), "appengine.SendReply") - proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") - proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") - proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") - proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") - proto.RegisterType((*PollReply)(nil), "appengine.PollReply") - proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") - proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) -} - -var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ - // 3088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, - 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, - 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, - 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, - 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, - 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, - 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, - 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, - 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, - 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, - 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, - 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, - 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, - 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, - 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, - 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, - 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, - 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, - 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, - 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, - 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, - 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, - 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, - 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, - 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, - 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, - 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, - 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, - 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, - 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, - 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, - 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, - 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, - 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, - 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, - 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, - 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, - 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, - 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, - 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, - 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, - 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, - 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, - 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, - 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, - 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, - 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, - 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, - 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, - 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, - 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, - 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, - 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, - 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, - 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, - 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, - 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, - 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, - 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, - 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, - 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, - 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, - 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, - 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, - 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, - 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, - 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, - 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, - 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, - 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, - 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, - 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, - 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, - 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, - 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, - 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, - 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, - 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, - 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, - 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, - 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, - 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, - 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, - 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, - 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, - 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, - 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, - 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, - 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, - 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, - 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, - 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, - 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, - 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, - 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, - 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, - 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, - 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, - 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, - 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, - 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, - 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, - 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, - 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, - 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, - 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, - 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, - 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, - 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, - 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, - 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, - 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, - 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, - 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, - 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, - 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, - 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, - 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, - 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, - 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, - 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, - 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, - 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, - 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, - 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, - 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, - 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, - 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, - 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, - 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, - 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, - 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, - 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, - 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, - 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, - 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, - 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, - 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, - 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, - 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, - 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, - 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, - 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, - 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, - 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, - 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, - 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, - 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, - 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, - 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, - 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, - 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, - 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, - 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, - 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, - 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, - 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, - 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, - 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, - 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, - 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, - 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, - 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, - 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, - 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, - 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, - 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, - 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, - 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, - 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, - 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, - 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, - 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, - 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, - 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, - 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, - 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, - 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, - 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, - 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, - 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, - 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, - 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, - 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, - 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, - 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, - 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, - 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, - 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, - 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, - 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, - 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, - 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto deleted file mode 100644 index 2fcc7953..00000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +++ /dev/null @@ -1,460 +0,0 @@ -syntax = "proto2"; -option go_package = "socket"; - -package appengine; - -message RemoteSocketServiceError { - enum ErrorCode { - SYSTEM_ERROR = 1; - GAI_ERROR = 2; - FAILURE = 4; - PERMISSION_DENIED = 5; - INVALID_REQUEST = 6; - SOCKET_CLOSED = 7; - } - - enum SystemError { - option allow_alias = true; - - SYS_SUCCESS = 0; - SYS_EPERM = 1; - SYS_ENOENT = 2; - SYS_ESRCH = 3; - SYS_EINTR = 4; - SYS_EIO = 5; - SYS_ENXIO = 6; - SYS_E2BIG = 7; - SYS_ENOEXEC = 8; - SYS_EBADF = 9; - SYS_ECHILD = 10; - SYS_EAGAIN = 11; - SYS_EWOULDBLOCK = 11; - SYS_ENOMEM = 12; - SYS_EACCES = 13; - SYS_EFAULT = 14; - SYS_ENOTBLK = 15; - SYS_EBUSY = 16; - SYS_EEXIST = 17; - SYS_EXDEV = 18; - SYS_ENODEV = 19; - SYS_ENOTDIR = 20; - SYS_EISDIR = 21; - SYS_EINVAL = 22; - SYS_ENFILE = 23; - SYS_EMFILE = 24; - SYS_ENOTTY = 25; - SYS_ETXTBSY = 26; - SYS_EFBIG = 27; - SYS_ENOSPC = 28; - SYS_ESPIPE = 29; - SYS_EROFS = 30; - SYS_EMLINK = 31; - SYS_EPIPE = 32; - SYS_EDOM = 33; - SYS_ERANGE = 34; - SYS_EDEADLK = 35; - SYS_EDEADLOCK = 35; - SYS_ENAMETOOLONG = 36; - SYS_ENOLCK = 37; - SYS_ENOSYS = 38; - SYS_ENOTEMPTY = 39; - SYS_ELOOP = 40; - SYS_ENOMSG = 42; - SYS_EIDRM = 43; - SYS_ECHRNG = 44; - SYS_EL2NSYNC = 45; - SYS_EL3HLT = 46; - SYS_EL3RST = 47; - SYS_ELNRNG = 48; - SYS_EUNATCH = 49; - SYS_ENOCSI = 50; - SYS_EL2HLT = 51; - SYS_EBADE = 52; - SYS_EBADR = 53; - SYS_EXFULL = 54; - SYS_ENOANO = 55; - SYS_EBADRQC = 56; - SYS_EBADSLT = 57; - SYS_EBFONT = 59; - SYS_ENOSTR = 60; - SYS_ENODATA = 61; - SYS_ETIME = 62; - SYS_ENOSR = 63; - SYS_ENONET = 64; - SYS_ENOPKG = 65; - SYS_EREMOTE = 66; - SYS_ENOLINK = 67; - SYS_EADV = 68; - SYS_ESRMNT = 69; - SYS_ECOMM = 70; - SYS_EPROTO = 71; - SYS_EMULTIHOP = 72; - SYS_EDOTDOT = 73; - SYS_EBADMSG = 74; - SYS_EOVERFLOW = 75; - SYS_ENOTUNIQ = 76; - SYS_EBADFD = 77; - SYS_EREMCHG = 78; - SYS_ELIBACC = 79; - SYS_ELIBBAD = 80; - SYS_ELIBSCN = 81; - SYS_ELIBMAX = 82; - SYS_ELIBEXEC = 83; - SYS_EILSEQ = 84; - SYS_ERESTART = 85; - SYS_ESTRPIPE = 86; - SYS_EUSERS = 87; - SYS_ENOTSOCK = 88; - SYS_EDESTADDRREQ = 89; - SYS_EMSGSIZE = 90; - SYS_EPROTOTYPE = 91; - SYS_ENOPROTOOPT = 92; - SYS_EPROTONOSUPPORT = 93; - SYS_ESOCKTNOSUPPORT = 94; - SYS_EOPNOTSUPP = 95; - SYS_ENOTSUP = 95; - SYS_EPFNOSUPPORT = 96; - SYS_EAFNOSUPPORT = 97; - SYS_EADDRINUSE = 98; - SYS_EADDRNOTAVAIL = 99; - SYS_ENETDOWN = 100; - SYS_ENETUNREACH = 101; - SYS_ENETRESET = 102; - SYS_ECONNABORTED = 103; - SYS_ECONNRESET = 104; - SYS_ENOBUFS = 105; - SYS_EISCONN = 106; - SYS_ENOTCONN = 107; - SYS_ESHUTDOWN = 108; - SYS_ETOOMANYREFS = 109; - SYS_ETIMEDOUT = 110; - SYS_ECONNREFUSED = 111; - SYS_EHOSTDOWN = 112; - SYS_EHOSTUNREACH = 113; - SYS_EALREADY = 114; - SYS_EINPROGRESS = 115; - SYS_ESTALE = 116; - SYS_EUCLEAN = 117; - SYS_ENOTNAM = 118; - SYS_ENAVAIL = 119; - SYS_EISNAM = 120; - SYS_EREMOTEIO = 121; - SYS_EDQUOT = 122; - SYS_ENOMEDIUM = 123; - SYS_EMEDIUMTYPE = 124; - SYS_ECANCELED = 125; - SYS_ENOKEY = 126; - SYS_EKEYEXPIRED = 127; - SYS_EKEYREVOKED = 128; - SYS_EKEYREJECTED = 129; - SYS_EOWNERDEAD = 130; - SYS_ENOTRECOVERABLE = 131; - SYS_ERFKILL = 132; - } - - optional int32 system_error = 1 [default=0]; - optional string error_detail = 2; -} - -message AddressPort { - required int32 port = 1; - optional bytes packed_address = 2; - - optional string hostname_hint = 3; -} - - - -message CreateSocketRequest { - enum SocketFamily { - IPv4 = 1; - IPv6 = 2; - } - - enum SocketProtocol { - TCP = 1; - UDP = 2; - } - - required SocketFamily family = 1; - required SocketProtocol protocol = 2; - - repeated SocketOption socket_options = 3; - - optional AddressPort proxy_external_ip = 4; - - optional int32 listen_backlog = 5 [default=0]; - - optional AddressPort remote_ip = 6; - - optional string app_id = 9; - - optional int64 project_id = 10; -} - -message CreateSocketReply { - optional string socket_descriptor = 1; - - optional AddressPort server_address = 3; - - optional AddressPort proxy_external_ip = 4; - - extensions 1000 to max; -} - - - -message BindRequest { - required string socket_descriptor = 1; - required AddressPort proxy_external_ip = 2; -} - -message BindReply { - optional AddressPort proxy_external_ip = 1; -} - - - -message GetSocketNameRequest { - required string socket_descriptor = 1; -} - -message GetSocketNameReply { - optional AddressPort proxy_external_ip = 2; -} - - - -message GetPeerNameRequest { - required string socket_descriptor = 1; -} - -message GetPeerNameReply { - optional AddressPort peer_ip = 2; -} - - -message SocketOption { - - enum SocketOptionLevel { - SOCKET_SOL_IP = 0; - SOCKET_SOL_SOCKET = 1; - SOCKET_SOL_TCP = 6; - SOCKET_SOL_UDP = 17; - } - - enum SocketOptionName { - option allow_alias = true; - - SOCKET_SO_DEBUG = 1; - SOCKET_SO_REUSEADDR = 2; - SOCKET_SO_TYPE = 3; - SOCKET_SO_ERROR = 4; - SOCKET_SO_DONTROUTE = 5; - SOCKET_SO_BROADCAST = 6; - SOCKET_SO_SNDBUF = 7; - SOCKET_SO_RCVBUF = 8; - SOCKET_SO_KEEPALIVE = 9; - SOCKET_SO_OOBINLINE = 10; - SOCKET_SO_LINGER = 13; - SOCKET_SO_RCVTIMEO = 20; - SOCKET_SO_SNDTIMEO = 21; - - SOCKET_IP_TOS = 1; - SOCKET_IP_TTL = 2; - SOCKET_IP_HDRINCL = 3; - SOCKET_IP_OPTIONS = 4; - - SOCKET_TCP_NODELAY = 1; - SOCKET_TCP_MAXSEG = 2; - SOCKET_TCP_CORK = 3; - SOCKET_TCP_KEEPIDLE = 4; - SOCKET_TCP_KEEPINTVL = 5; - SOCKET_TCP_KEEPCNT = 6; - SOCKET_TCP_SYNCNT = 7; - SOCKET_TCP_LINGER2 = 8; - SOCKET_TCP_DEFER_ACCEPT = 9; - SOCKET_TCP_WINDOW_CLAMP = 10; - SOCKET_TCP_INFO = 11; - SOCKET_TCP_QUICKACK = 12; - } - - required SocketOptionLevel level = 1; - required SocketOptionName option = 2; - required bytes value = 3; -} - - -message SetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message SetSocketOptionsReply { -} - -message GetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message GetSocketOptionsReply { - repeated SocketOption options = 2; -} - - -message ConnectRequest { - required string socket_descriptor = 1; - required AddressPort remote_ip = 2; - optional double timeout_seconds = 3 [default=-1]; -} - -message ConnectReply { - optional AddressPort proxy_external_ip = 1; - - extensions 1000 to max; -} - - -message ListenRequest { - required string socket_descriptor = 1; - required int32 backlog = 2; -} - -message ListenReply { -} - - -message AcceptRequest { - required string socket_descriptor = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message AcceptReply { - optional bytes new_socket_descriptor = 2; - optional AddressPort remote_address = 3; -} - - - -message ShutDownRequest { - enum How { - SOCKET_SHUT_RD = 1; - SOCKET_SHUT_WR = 2; - SOCKET_SHUT_RDWR = 3; - } - required string socket_descriptor = 1; - required How how = 2; - required int64 send_offset = 3; -} - -message ShutDownReply { -} - - - -message CloseRequest { - required string socket_descriptor = 1; - optional int64 send_offset = 2 [default=-1]; -} - -message CloseReply { -} - - - -message SendRequest { - required string socket_descriptor = 1; - required bytes data = 2 [ctype=CORD]; - required int64 stream_offset = 3; - optional int32 flags = 4 [default=0]; - optional AddressPort send_to = 5; - optional double timeout_seconds = 6 [default=-1]; -} - -message SendReply { - optional int32 data_sent = 1; -} - - -message ReceiveRequest { - enum Flags { - MSG_OOB = 1; - MSG_PEEK = 2; - } - required string socket_descriptor = 1; - required int32 data_size = 2; - optional int32 flags = 3 [default=0]; - optional double timeout_seconds = 5 [default=-1]; -} - -message ReceiveReply { - optional int64 stream_offset = 2; - optional bytes data = 3 [ctype=CORD]; - optional AddressPort received_from = 4; - optional int32 buffer_size = 5; -} - - - -message PollEvent { - - enum PollEventFlag { - SOCKET_POLLNONE = 0; - SOCKET_POLLIN = 1; - SOCKET_POLLPRI = 2; - SOCKET_POLLOUT = 4; - SOCKET_POLLERR = 8; - SOCKET_POLLHUP = 16; - SOCKET_POLLNVAL = 32; - SOCKET_POLLRDNORM = 64; - SOCKET_POLLRDBAND = 128; - SOCKET_POLLWRNORM = 256; - SOCKET_POLLWRBAND = 512; - SOCKET_POLLMSG = 1024; - SOCKET_POLLREMOVE = 4096; - SOCKET_POLLRDHUP = 8192; - }; - - required string socket_descriptor = 1; - required int32 requested_events = 2; - required int32 observed_events = 3; -} - -message PollRequest { - repeated PollEvent events = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message PollReply { - repeated PollEvent events = 2; -} - -message ResolveRequest { - required string name = 1; - repeated CreateSocketRequest.SocketFamily address_families = 2; -} - -message ResolveReply { - enum ErrorCode { - SOCKET_EAI_ADDRFAMILY = 1; - SOCKET_EAI_AGAIN = 2; - SOCKET_EAI_BADFLAGS = 3; - SOCKET_EAI_FAIL = 4; - SOCKET_EAI_FAMILY = 5; - SOCKET_EAI_MEMORY = 6; - SOCKET_EAI_NODATA = 7; - SOCKET_EAI_NONAME = 8; - SOCKET_EAI_SERVICE = 9; - SOCKET_EAI_SOCKTYPE = 10; - SOCKET_EAI_SYSTEM = 11; - SOCKET_EAI_BADHINTS = 12; - SOCKET_EAI_PROTOCOL = 13; - SOCKET_EAI_OVERFLOW = 14; - SOCKET_EAI_MAX = 15; - }; - - repeated bytes packed_address = 2; - optional string canonical_name = 3; - repeated string aliases = 4; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae65..2ae8ab9f 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go index 21860ca0..6f169be4 100644 --- a/vendor/google.golang.org/appengine/namespace.go +++ b/vendor/google.golang.org/appengine/namespace.go @@ -5,11 +5,10 @@ package appengine import ( + "context" "fmt" "regexp" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" ) diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go deleted file mode 100644 index 3de46df8..00000000 --- a/vendor/google.golang.org/appengine/socket/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package socket provides outbound network sockets. -// -// This package is only required in the classic App Engine environment. -// Applications running only in App Engine "flexible environment" should -// use the standard library's net package. -package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go deleted file mode 100644 index 0ad50e2d..00000000 --- a/vendor/google.golang.org/appengine/socket/socket_classic.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package socket - -import ( - "fmt" - "io" - "net" - "strconv" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" - - pb "google.golang.org/appengine/internal/socket" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - return DialTimeout(ctx, protocol, addr, 0) -} - -var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ - pb.CreateSocketRequest_IPv4, - pb.CreateSocketRequest_IPv6, -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. - if timeout > 0 { - var cancel context.CancelFunc - dialCtx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.Atoi(portStr) - if err != nil { - return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) - } - - var prot pb.CreateSocketRequest_SocketProtocol - switch protocol { - case "tcp": - prot = pb.CreateSocketRequest_TCP - case "udp": - prot = pb.CreateSocketRequest_UDP - default: - return nil, fmt.Errorf("socket: unknown protocol %q", protocol) - } - - packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - if len(packedAddrs) == 0 { - return nil, fmt.Errorf("no addresses for %q", host) - } - - packedAddr := packedAddrs[0] // use first address - fam := pb.CreateSocketRequest_IPv4 - if len(packedAddr) == net.IPv6len { - fam = pb.CreateSocketRequest_IPv6 - } - - req := &pb.CreateSocketRequest{ - Family: fam.Enum(), - Protocol: prot.Enum(), - RemoteIp: &pb.AddressPort{ - Port: proto.Int32(int32(port)), - PackedAddress: packedAddr, - }, - } - if resolved { - req.RemoteIp.HostnameHint = &host - } - res := &pb.CreateSocketReply{} - if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { - return nil, err - } - - return &Conn{ - ctx: ctx, - desc: res.GetSocketDescriptor(), - prot: prot, - local: res.ProxyExternalIp, - remote: req.RemoteIp, - }, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - packedAddrs, _, err := resolve(ctx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - addrs = make([]net.IP, len(packedAddrs)) - for i, pa := range packedAddrs { - addrs[i] = net.IP(pa) - } - return addrs, nil -} - -func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { - // Check if it's an IP address. - if ip := net.ParseIP(host); ip != nil { - if ip := ip.To4(); ip != nil { - return [][]byte{ip}, false, nil - } - return [][]byte{ip}, false, nil - } - - req := &pb.ResolveRequest{ - Name: &host, - AddressFamilies: fams, - } - res := &pb.ResolveReply{} - if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { - // XXX: need to map to pb.ResolveReply_ErrorCode? - return nil, false, err - } - return res.PackedAddress, true, nil -} - -// withDeadline is like context.WithDeadline, except it ignores the zero deadline. -func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { - if deadline.IsZero() { - return parent, func() {} - } - return context.WithDeadline(parent, deadline) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - ctx context.Context - desc string - offset int64 - - prot pb.CreateSocketRequest_SocketProtocol - local, remote *pb.AddressPort - - readDeadline, writeDeadline time.Time // optional -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - cn.ctx = ctx -} - -func (cn *Conn) Read(b []byte) (n int, err error) { - const maxRead = 1 << 20 - if len(b) > maxRead { - b = b[:maxRead] - } - - req := &pb.ReceiveRequest{ - SocketDescriptor: &cn.desc, - DataSize: proto.Int32(int32(len(b))), - } - res := &pb.ReceiveReply{} - if !cn.readDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) - defer cancel() - if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { - return 0, err - } - if len(res.Data) == 0 { - return 0, io.EOF - } - if len(res.Data) > len(b) { - return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) - } - return copy(b, res.Data), nil -} - -func (cn *Conn) Write(b []byte) (n int, err error) { - const lim = 1 << 20 // max per chunk - - for n < len(b) { - chunk := b[n:] - if len(chunk) > lim { - chunk = chunk[:lim] - } - - req := &pb.SendRequest{ - SocketDescriptor: &cn.desc, - Data: chunk, - StreamOffset: &cn.offset, - } - res := &pb.SendReply{} - if !cn.writeDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) - defer cancel() - if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { - // assume zero bytes were sent in this RPC - break - } - n += int(res.GetDataSent()) - cn.offset += int64(res.GetDataSent()) - } - - return -} - -func (cn *Conn) Close() error { - req := &pb.CloseRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.CloseReply{} - if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { - return err - } - cn.desc = "CLOSED" - return nil -} - -func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { - if ap == nil { - return nil - } - switch prot { - case pb.CreateSocketRequest_TCP: - return &net.TCPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - case pb.CreateSocketRequest_UDP: - return &net.UDPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - } - panic("unknown protocol " + prot.String()) -} - -func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } -func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } - -func (cn *Conn) SetDeadline(t time.Time) error { - cn.readDeadline = t - cn.writeDeadline = t - return nil -} - -func (cn *Conn) SetReadDeadline(t time.Time) error { - cn.readDeadline = t - return nil -} - -func (cn *Conn) SetWriteDeadline(t time.Time) error { - cn.writeDeadline = t - return nil -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - req := &pb.GetSocketNameRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.GetSocketNameReply{} - return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) -} - -func init() { - internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go deleted file mode 100644 index c804169a..00000000 --- a/vendor/google.golang.org/appengine/socket/socket_vm.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package socket - -import ( - "net" - "time" - - "golang.org/x/net/context" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - conn, err := net.Dial(protocol, addr) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - conn, err := net.DialTimeout(protocol, addr, timeout) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - return net.LookupIP(host) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - net.Conn -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - // This function is not required in App Engine "flexible environment". -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - // This function is not required in App Engine "flexible environment". - return nil -} diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go index 05642a99..fcf3ad0a 100644 --- a/vendor/google.golang.org/appengine/timeout.go +++ b/vendor/google.golang.org/appengine/timeout.go @@ -4,7 +4,7 @@ package appengine -import "golang.org/x/net/context" +import "context" // IsTimeoutError reports whether err is a timeout error. func IsTimeoutError(err error) bool { diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f4..00000000 --- a/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f04..00000000 --- a/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d..6c0d7241 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 0e6ae69a..ab0fbb79 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 3efca459..52d530d7 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -34,26 +34,26 @@ import ( // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -112,19 +111,31 @@ func (a *Attributes) String() string { sb.WriteString("{") first := true for k, v := range a.m { - var key, val string - if str, ok := k.(interface{ String() string }); ok { - key = str.String() - } - if str, ok := v.(interface{ String() string }); ok { - val = str.String() - } if !first { sb.WriteString(", ") } - sb.WriteString(fmt.Sprintf("%q: %q, ", key, val)) + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) first = false } sb.WriteString("}") return sb.String() } + +func str(x any) (s string) { + if v, ok := x.(fmt.Stringer); ok { + return fmt.Sprint(v) + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 8f00523c..d79560a2 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } @@ -105,8 +120,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +130,13 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +151,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +177,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +285,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -343,9 +378,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -390,15 +429,14 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 3929c26d..a7f1eeec 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_wrapper.go similarity index 52% rename from vendor/google.golang.org/grpc/balancer_conn_wrappers.go rename to vendor/google.golang.org/grpc/balancer_wrapper.go index 04b9ad41..b5e30cff 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -32,21 +32,13 @@ import ( "google.golang.org/grpc/resolver" ) -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the // balancer.Balancer interface. The ClientConn is free to call these methods // concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. // // ccBalancerWrapper also implements the balancer.ClientConn interface and is // passed to the Balancer implementations. It invokes unexported methods on the @@ -57,99 +49,75 @@ const ( type ccBalancerWrapper struct { // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer + // The following fields are only accessed within the serializer or during + // initialization. curBalancerName string + balancer *gracefulswitch.Balancer - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool } -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) return ccb } // updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - // If the addresses specified in the update contain addresses of type - // "grpclb" and the selected LB policy is not "grpclb", these addresses - // will be filtered out and ccs will be modified with the updated - // address list. - if ccb.curBalancerName != grpclbName { - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) } - errCh <- ccb.balancer.UpdateClientConnState(*ccs) + errCh <- err }) if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") - } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) + return nil } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() + return <-errCh } +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } ccb.balancer.ResolverError(err) }) - ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -163,8 +131,10 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } // TODO: Other languages use case-sensitive balancer registries. We should // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. if strings.EqualFold(ccb.curBalancerName, name) { @@ -172,7 +142,6 @@ func (ccb *ccBalancerWrapper) switchTo(name string) { } ccb.buildLoadBalancingPolicy(name) }) - ccb.mu.Unlock() } // buildLoadBalancingPolicy performs the following: @@ -199,151 +168,69 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ccb.curBalancerName = builder.Name() } +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } + ccb.closed = true ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-done - // Spawn a goroutine to close the balancer (since it may block trying to - // cleanup all allocated resources) and return early. - go b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { return } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - + ccb.balancer.Close() + ccb.balancer = nil }) - ccb.mu.Unlock() - - <-done + ccb.serializerCancel() } -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") } + ccb.mu.Unlock() if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ac, err := ccb.cc.newAddrConn(addrs, opts) + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } ac.acbw = acbw return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -352,25 +239,39 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - + ccb.mu.Unlock() // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - - ccb.cc.resolveNow(o) + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) } func (ccb *ccBalancerWrapper) Target() string { @@ -380,12 +281,28 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - ac *addrConn // read-only + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) mu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer } +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + func (acbw *acBalancerWrapper) String() string { return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } @@ -398,6 +315,10 @@ func (acbw *acBalancerWrapper) Connect() { go acbw.ac.connect() } +func (acbw *acBalancerWrapper) Shutdown() { + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + // NewStream begins a streaming RPC on the addrConn. If the addrConn is not // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. @@ -411,7 +332,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, // Invoke performs a unary RPC. If the addrConn is not ready, returns // errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ec2c2fa1..e9e97d45 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto @@ -430,7 +430,7 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identitiy. + // The authority is the name of such a server identity. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index e6a1dc5d..788c89c1 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,12 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return err - } - defer cc.idlenessMgr.onCallEnd() - +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 95a7459b..f6e815e6 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -33,10 +33,11 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -45,16 +46,14 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -118,48 +117,20 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - csMgr: &connectivityStateManager{}, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -177,19 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * for _, opt := range opts { opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -203,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -229,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -316,117 +304,82 @@ func (cc *ClientConn) addTraceEvent(msg string) { channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) } +type idler ClientConn + +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() - logger.Info("ClientConn asked to exit idle mode when not in idle mode") - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper() - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + // enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() + if cc.conns == nil { cc.mu.Unlock() - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - logger.Error("ClientConn asked to enter idle mode when not active") - return nil + return } - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + cc.mu.Unlock() - go func() { - cc.addTraceEvent("entering idle mode") - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() - return nil + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -474,7 +427,6 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) cc.addTraceEvent("created") - cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -491,7 +443,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -503,7 +455,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -539,13 +491,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -561,6 +527,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -590,7 +558,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -621,53 +589,35 @@ type ClientConn struct { dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idlenessManager + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -707,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -759,6 +695,16 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -773,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -821,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -843,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -859,22 +802,36 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), @@ -882,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -903,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -995,8 +947,9 @@ func equalAddresses(a, b []resolver.Address) bool { // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + addrs = copyAddressesWithoutBalancerAttributes(addrs) if equalAddresses(ac.addrs, addrs) { ac.mu.Unlock() return @@ -1031,8 +984,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.cancel() ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - // We have to defer here because GracefulClose => Close => onClose, which - // requires locking ac.mu. + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. if ac.transport != nil { defer ac.transport.GracefulClose() ac.transport = nil @@ -1108,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1137,35 +1090,25 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1192,7 +1135,14 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() + + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() cc.mu.Lock() if cc.conns == nil { @@ -1200,34 +1150,22 @@ func (cc *ClientConn) Close() error { return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1248,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1286,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1336,12 +1274,14 @@ func (ac *addrConn) resetTransport() { if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + ac.mu.Lock() if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() return } - ac.mu.Lock() + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1537,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1625,16 +1565,7 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1648,6 +1579,29 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1774,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1807,19 +1761,70 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and url. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - URL: *u, - }, nil + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: @@ -1855,54 +1860,17 @@ func (cc *ClientConn) determineAuthority() error { } endpoint := cc.parsedTarget.Endpoint() - target := cc.target - switch { - case authorityFromDialOption != "": + if authorityFromDialOption != "" { cc.authority = authorityFromDialOption - case authorityFromCreds != "": + } else if authorityFromCreds != "" { cc.authority = authorityFromCreds - case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): - // TODO: remove when the unix resolver implements optional interface to - // return channel authority. - cc.authority = "localhost" - case strings.HasPrefix(endpoint, ":"): + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { cc.authority = "localhost" + endpoint - default: - // TODO: Define an optional interface on the resolver builder to return - // the channel authority given the user's dial target. For resolvers - // which don't implement this interface, we will use the endpoint from - // "scheme://authority/endpoint" as the default authority. - cc.authority = endpoint + } else { + cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil -} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 12977654..411e3dfd 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 11b10618..08476ad1 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd2..5dafd34e 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 15a3d510..ba242618 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -46,6 +46,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,7 +64,6 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -78,6 +78,7 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -138,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { return &joinDialOption{opts: opts} } +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -235,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -398,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -472,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -622,12 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -664,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // @@ -676,3 +693,26 @@ func WithIdleTimeout(d time.Duration) DialOption { o.idleTimeout = d }) } + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 07a58613..5ebf88d7 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) @@ -90,9 +85,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35a..0ee3d3ba 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e..ac73c9ce 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be3..16928c9c 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a482..b1674d82 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 5de66e40..ecfd36d7 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -33,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/idle.go deleted file mode 100644 index dc3dc72f..00000000 --- a/vendor/google.golang.org/grpc/idle.go +++ /dev/null @@ -1,287 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "time" -) - -// For overriding in unit tests. -var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { - return time.AfterFunc(d, f) -} - -// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter -// and exit from idle mode. -type idlenessEnforcer interface { - exitIdleMode() error - enterIdleMode() error -} - -// idlenessManager defines the functionality required to track RPC activity on a -// channel. -type idlenessManager interface { - onCallBegin() error - onCallEnd() - close() -} - -type noopIdlenessManager struct{} - -func (noopIdlenessManager) onCallBegin() error { return nil } -func (noopIdlenessManager) onCallEnd() {} -func (noopIdlenessManager) close() {} - -// idlenessManagerImpl implements the idlenessManager interface. It uses atomic -// operations to synchronize access to shared state and a mutex to guarantee -// mutual exclusion in a critical section. -type idlenessManagerImpl struct { - // State accessed atomically. - lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. - activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. - activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. - closed int32 // Boolean; True when the manager is closed. - - // Can be accessed without atomics or mutex since these are set at creation - // time and read-only after that. - enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - - // idleMu is used to guarantee mutual exclusion in two scenarios: - // - Opposing intentions: - // - a: Idle timeout has fired and handleIdleTimeout() is trying to put - // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and onCallBegin() - // is trying to prevent the channel from going idle. - // - Competing intentions: - // - The channel is in idle mode and there are multiple RPCs starting at - // the same time, all trying to move the channel out of idle. Only one - // of them should succeed in doing so, while the other RPCs should - // piggyback on the first one and be successfully handled. - idleMu sync.RWMutex - actuallyIdle bool - timer *time.Timer -} - -// newIdlenessManager creates a new idleness manager implementation for the -// given idle timeout. -func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { - if idleTimeout == 0 { - return noopIdlenessManager{} - } - - i := &idlenessManagerImpl{ - enforcer: enforcer, - timeout: int64(idleTimeout), - } - i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) - return i -} - -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if i.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - i.timer.Reset(d) -} - -// handleIdleTimeout is the timer callback that is invoked upon expiry of the -// configured idle timeout. The channel is considered inactive if there are no -// ongoing calls and no RPC activity since the last time the timer fired. -func (i *idlenessManagerImpl) handleIdleTimeout() { - if i.isClosed() { - return - } - - if atomic.LoadInt32(&i.activeCallsCount) > 0 { - i.resetIdleTimer(time.Duration(i.timeout)) - return - } - - // There has been activity on the channel since we last got here. Reset the - // timer and return. - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { - // Set the timer to fire after a duration of idle timeout, calculated - // from the time the most recent RPC completed. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) - i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) - return - } - - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - i.resetIdleTimer(time.Duration(i.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. - if i.tryEnterIdleMode() { - // Successfully entered idle mode. No timer needed until we exit idle. - return - } - - // Failed to enter idle mode due to a concurrent RPC that kept the channel - // active, or because of an error from the channel. Undo the attempt to - // enter idle, and reset the timer to try again later. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.resetIdleTimer(time.Duration(i.timeout)) -} - -// tryEnterIdleMode instructs the channel to enter idle mode. But before -// that, it performs a last minute check to ensure that no new RPC has come in, -// making the channel active. -// -// Return value indicates whether or not the channel moved to idle mode. -// -// Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (i *idlenessManagerImpl) tryEnterIdleMode() bool { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { - // We raced and lost to a new RPC. Very rare, but stop entering idle. - return false - } - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we - // checked for calls count and activity in handleIdleTimeout(), but - // before the CAS operation. So, we need to check for activity again. - return false - } - - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := i.enforcer.enterIdleMode(); err != nil { - logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. - i.actuallyIdle = true - return true -} - -// onCallBegin is invoked at the start of every RPC. -func (i *idlenessManagerImpl) onCallBegin() error { - if i.isClosed() { - return nil - } - - if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { - // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) - return nil - } - - // Channel is either in idle mode or is in the process of moving to idle - // mode. Attempt to exit idle mode to allow this RPC. - if err := i.exitIdleMode(); err != nil { - // Undo the increment to calls count, and return an error causing the - // RPC to fail. - atomic.AddInt32(&i.activeCallsCount, -1) - return err - } - - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) - return nil -} - -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (i *idlenessManagerImpl) exitIdleMode() error { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if !i.actuallyIdle { - // This can happen in two scenarios: - // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called - // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and onCallBegin() noticed that the calls count is negative. - // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in onCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. - // - // Either way, nothing to do here. - return nil - } - - if err := i.enforcer.exitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) - } - - // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.actuallyIdle = false - - // Start a new timer to fire after the configured idle timeout. - i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) - return nil -} - -// onCallEnd is invoked at the end of every RPC. -func (i *idlenessManagerImpl) onCallEnd() { - if i.isClosed() { - return - } - - // Record the time at which the most recent call finished. - atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) - - // Decrement the active calls count. This count can temporarily go negative - // when the timer callback is in the process of moving the channel to idle - // mode, but one or more RPCs come in and complete before the timer callback - // can get done with the process of moving to idle mode. - atomic.AddInt32(&i.activeCallsCount, -1) -} - -func (i *idlenessManagerImpl) isClosed() bool { - return atomic.LoadInt32(&i.closed) == 1 -} - -func (i *idlenessManagerImpl) close() { - atomic.StoreInt32(&i.closed, 1) - - i.idleMu.Lock() - i.timer.Stop() - i.timer = nil - i.idleMu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index bb96ef57..877d78fc 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3d..fed1c011 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 08666f62..3c594e6e 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d96..94a08d68 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 6c3f6322..0f31274a 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -230,7 +230,7 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { @@ -270,7 +270,7 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 81c2f5fd..11f91668 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -28,49 +31,50 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any closed bool + closing bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. -func (b *Unbounded) Get() <-chan interface{} { +// is closed after all data is drained. +func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 777cbcd7..fc094f34 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,15 +24,14 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -40,8 +39,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +54,20 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +105,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +164,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +200,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +222,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +248,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +268,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +747,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index 8e13a3d2..f89e6f77 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 7b2f350e..1d4020f5 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44..98288c3f 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc40..b5568b22 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b590..9deee7f6 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 80fd5c7d..685a3cb4 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,10 +36,13 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy, which can be enabled by setting the environment - // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1..29f234ac 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 00000000..7f7044e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index b68e26a3..bfc45102 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 02224b42..faa998de 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // rewrite PrefixLogger a little to ensure that we don't use the global // `Logger` here, and instead use the `logger` field. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index d08e3e90..aa97273e 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -80,6 +80,13 @@ func Uint32() uint32 { return r.Uint32() } +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + // Shuffle implements rand.Shuffle on the grpcrand global source. var Shuffle = func(n int, f func(int, int)) { mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 37b8d411..f7f40a16 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -32,14 +31,12 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { - // Done is closed once the serializer is shut down completely, i.e all + // done is closed once the serializer is shut down completely, i.e all // scheduled callbacks are executed and the serializer has deallocated all // its resources. - Done chan struct{} + done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -48,12 +45,12 @@ type CallbackSerializer struct { // callbacks will be added once this context is canceled, and any pending un-run // callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{ - Done: make(chan struct{}), + cs := &CallbackSerializer{ + done: make(chan struct{}), callbacks: buffer.NewUnbounded(), } - go t.run(ctx) - return t + go cs.run(ctx) + return cs } // Schedule adds a callback to be scheduled after existing callbacks are run. @@ -64,56 +61,40 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // Return value indicates if the callback was successfully added to the list of // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - t.closedMu.Lock() - defer t.closedMu.Unlock() - - if t.closed { - return false - } - t.callbacks.Put(f) - return true +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + return cs.callbacks.Put(f) == nil } -func (t *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) +func (cs *CallbackSerializer) run(ctx context.Context) { + defer close(cs.done) - defer close(t.Done) + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-t.callbacks.Get(): - if !ok { - return - } - t.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + case cb := <-cs.callbacks.Get(): + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing t.Done. - t.closedMu.Lock() - t.closed = true - backlog = t.fetchPendingCallbacks() - t.callbacks.Close() - t.closedMu.Unlock() - for _, b := range backlog { - b(ctx) + // Close the buffer to prevent new callbacks from being added. + cs.callbacks.Close() + + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } -func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-t.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - t.callbacks.Load() - default: - return backlog - } - } +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 00000000..aef8cec1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 00000000..fe49cb74 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() +} + +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout time.Duration + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } +} + +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() + } + m.timer = timeAfterFunc(d, m.handleIdleTimeout) +} + +func (m *Manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + m.resetIdleTimerLocked(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *Manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(m.timeout) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) + return + } + + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + m.resetIdleTimer(m.timeout) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // A very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() + m.actuallyIdle = true + return true +} + +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *Manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.ExitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. + // + // In any case, there is nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("failed to exit idle mode: %w", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.resetIdleTimerLocked(m.timeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *Manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *Manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *Manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } + m.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 42ff39c8..6c7ea6a5 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,33 +53,33 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - CanonicalString interface{} // func (codes.Code) string - // DrainServerTransports initiates a graceful close of existing connections - // on a gRPC server accepted on the provided listener address. An - // xDS-enabled server invokes this method on a grpc.Server when a particular - // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + CanonicalString any // func (codes.Code) string + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. // @@ -88,14 +92,14 @@ var ( // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + AddGlobalDialOptions any // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - DisableGlobalDialOptions interface{} // func() grpc.DialOption + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. // @@ -104,23 +108,26 @@ var ( ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. - JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -131,7 +138,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -163,7 +170,32 @@ var ( UnregisterRBACHTTPFilterForTesting func() // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found + // error for a given resource type and name. This is usually triggered when + // the associated watch timer fires. For testing purposes, having this + // function makes events more predictable than relying on timer events. + TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + + // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton + // to invoke resource not found for a resource type name and resource name. + TriggerXDSResourceNameNotFoundClient any // func(string, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -174,7 +206,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index c82e608e..900bfb71 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 0177af4b..70331913 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a94..f0603871 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 09a667f3..b66dcb21 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,53 +47,37 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +87,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -114,7 +98,8 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { @@ -140,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = customAuthorityResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -159,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -176,23 +151,26 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -218,28 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -278,7 +255,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +285,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +332,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +347,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -377,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -385,12 +366,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. - return "", "", errEndsWithColon + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 00000000..c7fc557d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 16091168..27cd81af 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -61,6 +61,10 @@ func (b *builder) Scheme() string { return b.scheme } +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + type nopResolver struct { } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index b0ead4f5..03ef2fed 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,13 +43,41 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +92,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +148,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go new file mode 100644 index 00000000..4f347edd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -0,0 +1,29 @@ +//go:build !unix && !windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 00000000..078137b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 00000000..fd7d43a8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index be5a9c81..b330cced 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -49,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: l.incomingWindowUpdateHandler(i) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3f..a9d70e2a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -220,18 +242,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +267,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +312,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -367,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 326bf084..c33ac596 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -58,6 +59,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. @@ -176,7 +179,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +265,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -330,7 +333,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -493,8 +496,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } @@ -566,7 +570,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -762,7 +766,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it interface{}) bool { + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -800,7 +804,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -815,7 +819,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -927,7 +931,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -1080,7 +1084,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1237,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1321,10 +1325,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - if streamID > id && streamID <= upperLimit { - atomic.StoreUint32(&stream.unprocessed, 1) - streamsToClose = append(streamsToClose, stream) - } + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) } } t.mu.Unlock() @@ -1399,7 +1401,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1435,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1505,14 +1500,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1520,15 +1516,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1550,13 +1543,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 79e86ba0..f6bac0e8 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -165,21 +162,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { + if config.MaxStreams != math.MaxUint32 { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, + Val: config.MaxStreams, }) } dynamicWindow := true @@ -238,7 +230,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -248,17 +240,19 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), - maxStreams: maxStreams, + loopyWriterDone: make(chan struct{}), + maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, @@ -272,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, bufferPool: newBufferPool(), } t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -282,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) if err != nil { return nil, err } @@ -339,7 +323,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler t.loopy.run() - close(t.writerDone) + close(t.loopyWriterDone) }() go t.keepalive() return t, nil @@ -347,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -374,10 +358,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -516,9 +501,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -566,7 +551,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -597,19 +582,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -635,8 +607,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + <-t.loopyWriterDone + close(t.readerDone) + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -670,7 +645,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(ctx, frame, handle); err != nil { t.Close(err) break } @@ -855,7 +830,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -939,7 +914,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -985,7 +960,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - return status.Convert(err).Err() + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } } return nil } @@ -1058,12 +1038,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } @@ -1245,10 +1228,6 @@ func (t *http2Server) Close(err error) { for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1314,10 +1293,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1400,11 +1375,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1436,10 +1411,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1464,6 +1441,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19cbb18f..dc29d590 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -30,15 +30,13 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -87,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -102,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -153,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( @@ -309,6 +296,7 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -316,12 +304,17 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -332,19 +325,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { n, err = w.conn.Write(b) return n, toIOError(err) } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -381,7 +389,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -389,7 +400,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -403,6 +418,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 41596198..24fa1032 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aa1c8965..b7b8fec1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -37,16 +37,13 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +53,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -269,7 +266,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -390,14 +388,10 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -433,6 +427,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -445,6 +445,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -559,6 +565,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -592,6 +599,8 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -703,7 +712,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -722,8 +731,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) @@ -736,7 +745,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf1..1e9485fd 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,8 +25,14 @@ import ( "context" "fmt" "strings" + + "google.golang.org/grpc/internal" ) +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -153,14 +159,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +211,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,33 +228,29 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD // is a map, there's no guarantee it's created using our helper functions) and // the extra kv pairs (AppendToOutgoingContext doesn't turn them into // lowercase). -// -// This is intended for gRPC-internal use ONLY. Users should use -// FromOutgoingContext instead. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219f..a821ff9b 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 02f97595..bf56faa7 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,31 +28,31 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - idle bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -95,6 +95,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { @@ -129,6 +130,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() @@ -190,23 +205,15 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.blockingCh = make(chan struct{}) - pw.idle = false } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index abe266b0..5128f936 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -25,13 +25,18 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -40,7 +45,9 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { @@ -57,23 +64,23 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfg := &pfConfig{} - if err := json.Unmarshal(js, cfg); err != nil { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) } return cfg, nil } type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn - cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -96,35 +103,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if state.BalancerConfig != nil { - cfg, ok := state.BalancerConfig.(*pfConfig) - if !ok { - return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - b.cfg = cfg + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - - if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -143,13 +159,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index cd455478..73bd6336 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 00000000..14aa6f20 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index efcb7f3e..ada5b9bb 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []interface{} { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 353c10b6..adf89dd9 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -77,25 +77,6 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -111,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -126,27 +104,29 @@ type Address struct { // BalancerAttributes contains arbitrary data about this address intended // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. @@ -190,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -234,11 +240,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -254,20 +255,7 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -293,6 +281,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -322,9 +315,12 @@ type Resolver interface { Close() } -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index b408b368..00000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,239 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 00000000..c79bab12 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2030736a..a4b6bc68 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) @@ -577,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -625,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -639,14 +640,18 @@ func encode(c baseCodec, msg interface{}) ([]byte, error) { return b, nil } -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } + if len(in) == 0 { + return nil, nil + } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } @@ -692,7 +697,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, @@ -726,12 +731,12 @@ type payloadInfo struct { } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -743,10 +748,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) @@ -757,7 +762,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -791,16 +796,18 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -860,19 +867,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 81969e7c..e89c5ac6 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -70,9 +70,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } - internal.DrainServerTransports = func(srv *Server, addr string) { - srv.drainServerTransports(addr) + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) } + internal.ServerFromContext = serverFromContext internal.AddGlobalServerOptions = func(opt ...ServerOption) { globalServerOptions = append(globalServerOptions, opt...) } @@ -81,12 +82,13 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,26 +101,20 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} -} - -type serverWorkerData struct { - st transport.ServerTransport - wg *sync.WaitGroup - stream *transport.Stream + mdata any } // Server is a gRPC server to serve RPC requests. @@ -140,12 +136,14 @@ type Server struct { quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan *serverWorkerData + serverWorkerChannel chan func() + serverWorkerChannelClose func() } type serverOptions struct { @@ -170,18 +168,23 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool + waitForHandlers bool } var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -233,6 +236,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + // WriteBufferSize determines how much data can be batched before doing a write // on the wire. The corresponding memory allocation for this buffer will be // twice the size to keep syscalls low. The default value for this buffer is @@ -273,9 +290,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -387,6 +404,9 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -552,6 +572,44 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -567,33 +625,27 @@ const serverWorkerResetThreshold = 1 << 16 // [1] https://github.com/golang/go/issues/18138 func (s *Server) serverWorker() { for completed := 0; completed < serverWorkerResetThreshold; completed++ { - data, ok := <-s.serverWorkerChannel + f, ok := <-s.serverWorkerChannel if !ok { return } - s.handleSingleStream(data) + f() } go s.serverWorker() } -func (s *Server) handleSingleStream(data *serverWorkerData) { - defer data.wg.Done() - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) -} - // initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannel = make(chan *serverWorkerData) + s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } } -func (s *Server) stopServerWorkers() { - close(s.serverWorkerChannel) -} - // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -632,7 +684,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -640,7 +692,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -655,14 +707,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -673,7 +725,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -714,7 +766,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -775,6 +827,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -882,24 +946,21 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + if !s.addConn(lisAddr, st) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } -func (s *Server) drainServerTransports(addr string) { - s.mu.Lock() - conns := s.conns[addr] - for st := range conns { - st.Drain("") - } - s.mu.Unlock() -} - // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { @@ -915,6 +976,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -939,33 +1001,44 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } + + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer s.handlersWG.Done() + s.handleStream(st, stream) + } - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) if s.opts.numServerWorkers > 0 { - data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannel <- data: + case s.serverWorkerChannel <- f: return default: // If all stream workers are busy, fallback to the default code path. } } - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) + go f() }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1009,31 +1082,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1096,7 +1145,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1115,7 +1164,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1143,7 +1192,7 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } @@ -1152,12 +1201,12 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { + return func(ctx context.Context, req any) (any, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1171,7 +1220,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1189,7 +1238,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1203,7 +1252,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1225,7 +1274,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1296,7 +1344,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1306,12 +1354,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1325,7 +1373,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1333,7 +1381,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1358,7 +1406,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1366,7 +1414,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1381,7 +1429,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1408,8 +1456,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1423,8 +1471,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1442,7 +1490,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1470,7 +1518,7 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } @@ -1479,12 +1527,12 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf if curr == len(interceptors)-1 { return finalHandler } - return func(srv interface{}, stream ServerStream) error { + return func(srv any, stream ServerStream) error { return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1498,15 +1546,15 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1520,7 +1568,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1537,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1579,7 +1627,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1623,7 +1671,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1657,7 +1705,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1675,53 +1723,87 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1730,19 +1812,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -1797,62 +1879,72 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } + + for len(s.conns) != 0 { + s.cv.Wait() } + s.conns = nil + if s.opts.numServerWorkers > 0 { - s.stopServerWorkers() + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() } - s.mu.Lock() if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1861,22 +1953,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1890,11 +1974,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // @@ -2052,3 +2175,34 @@ func validateSendCompressor(name, clientCompressors string) error { } return fmt.Errorf("client does not support compressor %q", name) } + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 00000000..48a64cfe --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 7a552a9b..4ab70e2d 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte @@ -134,7 +144,7 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte // Length is the size of the uncompressed payload data. Does not include any diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 53910fb7..a93360ef 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -77,11 +77,18 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, or if err wraps a type satisfying this, the appropriate Status is -// returned. For wrapped errors, the message returned contains the entire -// err.Error() text and not just the wrapped status. +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. // // - Otherwise, err is an error not compatible with this package. In this // case, a Status is returned with codes.Unknown and err's Error() message, @@ -92,11 +99,27 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { - return gs.GRPCStatus(), true + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true } var gs grpcstatus if errors.As(err, &gs) { - p := gs.GRPCStatus().Proto() + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() p.Message = err.Error() return status.FromProto(p), true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 10092685..d621f52b 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -47,6 +48,8 @@ import ( "google.golang.org/grpc/status" ) +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // StreamHandler defines the handler called by gRPC server to complete the // execution of a streaming RPC. // @@ -54,7 +57,7 @@ import ( // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +82,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +93,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -126,7 +131,7 @@ type ClientStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -135,7 +140,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -155,11 +160,6 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return nil, err - } - defer cc.idlenessMgr.onCallEnd() - // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -176,7 +176,17 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) @@ -433,7 +443,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -507,7 +517,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -788,23 +798,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -820,6 +831,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -860,7 +872,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -904,7 +916,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -928,24 +940,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } } return err } @@ -1001,18 +995,30 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -1028,7 +1034,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1055,7 +1061,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1270,7 +1276,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1348,7 +1354,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1393,7 +1399,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1512,7 +1518,7 @@ type ServerStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1521,7 +1527,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1602,7 +1608,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1610,7 +1616,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1677,7 +1683,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1685,7 +1691,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1757,7 +1763,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa4..07f01257 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 07a2d26b..9ded7932 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 0f1f8b9b..1ad1ba2a 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.56.1" +const Version = "1.61.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index a8e4732b..5da38a40 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -35,7 +35,6 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell @@ -77,12 +76,19 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -90,13 +96,15 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -104,105 +112,79 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.17 + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +grpc_testing_not_regenerate +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go new file mode 100644 index 00000000..2ef36bbc --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go @@ -0,0 +1,160 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodelim marshals and unmarshals varint size-delimited messages. +package protodelim + +import ( + "bufio" + "encoding/binary" + "fmt" + "io" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" +) + +// MarshalOptions is a configurable varint size-delimited marshaler. +type MarshalOptions struct{ proto.MarshalOptions } + +// MarshalTo writes a varint size-delimited wire-format message to w. +// If w returns an error, MarshalTo returns it unchanged. +func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) { + msgBytes, err := o.MarshalOptions.Marshal(m) + if err != nil { + return 0, err + } + + sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes))) + sizeWritten, err := w.Write(sizeBytes) + if err != nil { + return sizeWritten, err + } + msgWritten, err := w.Write(msgBytes) + if err != nil { + return sizeWritten + msgWritten, err + } + return sizeWritten + msgWritten, nil +} + +// MarshalTo writes a varint size-delimited wire-format message to w +// with the default options. +// +// See the documentation for [MarshalOptions.MarshalTo]. +func MarshalTo(w io.Writer, m proto.Message) (int, error) { + return MarshalOptions{}.MarshalTo(w, m) +} + +// UnmarshalOptions is a configurable varint size-delimited unmarshaler. +type UnmarshalOptions struct { + proto.UnmarshalOptions + + // MaxSize is the maximum size in wire-format bytes of a single message. + // Unmarshaling a message larger than MaxSize will return an error. + // A zero MaxSize will default to 4 MiB. + // Setting MaxSize to -1 disables the limit. + MaxSize int64 +} + +const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size + +// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size +// that is larger than its configured [UnmarshalOptions.MaxSize]. +type SizeTooLargeError struct { + // Size is the varint size of the message encountered + // that was larger than the provided MaxSize. + Size uint64 + + // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded. + MaxSize uint64 +} + +func (e *SizeTooLargeError) Error() string { + return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize) +} + +// Reader is the interface expected by [UnmarshalFrom]. +// It is implemented by *[bufio.Reader]. +type Reader interface { + io.Reader + io.ByteReader +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// The error is [io.EOF] error only if no bytes are read. +// If an EOF happens after reading some but not all the bytes, +// UnmarshalFrom returns a non-io.EOF error. +// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged, +// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned. +func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error { + var sizeArr [binary.MaxVarintLen64]byte + sizeBuf := sizeArr[:0] + for i := range sizeArr { + b, err := r.ReadByte() + if err != nil { + // Immediate EOF is unexpected. + if err == io.EOF && i != 0 { + break + } + return err + } + sizeBuf = append(sizeBuf, b) + if b < 0x80 { + break + } + } + size, n := protowire.ConsumeVarint(sizeBuf) + if n < 0 { + return protowire.ParseError(n) + } + + maxSize := o.MaxSize + if maxSize == 0 { + maxSize = defaultMaxSize + } + if maxSize != -1 && size > uint64(maxSize) { + return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "") + } + + var b []byte + var err error + if br, ok := r.(*bufio.Reader); ok { + // Use the []byte from the bufio.Reader instead of having to allocate one. + // This reduces CPU usage and allocated bytes. + b, err = br.Peek(int(size)) + if err == nil { + defer br.Discard(int(size)) + } else { + b = nil + } + } + if b == nil { + b = make([]byte, size) + _, err = io.ReadFull(r, b) + } + + if err == io.EOF { + return io.ErrUnexpectedEOF + } + if err != nil { + return err + } + if err := o.Unmarshal(b, m); err != nil { + return err + } + return nil +} + +// UnmarshalFrom parses and consumes a varint size-delimited wire-format message +// from r with the default options. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +// +// See the documentation for [UnmarshalOptions.UnmarshalFrom]. +func UnmarshalFrom(r Reader, m proto.Message) error { + return UnmarshalOptions{}.UnmarshalFrom(r, m) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index 5f28148d..f4790237 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -11,6 +11,7 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/json" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" @@ -23,7 +24,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -37,7 +38,7 @@ type UnmarshalOptions struct { // required fields will not return an error. AllowPartial bool - // If DiscardUnknown is set, unknown fields are ignored. + // If DiscardUnknown is set, unknown fields and enum name values are ignored. DiscardUnknown bool // Resolver is used for looking up types when unmarshaling @@ -47,9 +48,13 @@ type UnmarshalOptions struct { protoregistry.MessageTypeResolver protoregistry.ExtensionTypeResolver } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // It will clear the message first before setting the fields. // If it returns an error, the given message may be partially set. @@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { if o.Resolver == nil { o.Resolver = protoregistry.GlobalTypes } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } dec := decoder{json.NewDecoder(b), o} if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { @@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { // unmarshalMessage unmarshals a message into the given protoreflect.Message. func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { return unmarshal(d, m) } @@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field if err != nil { return err } - m.Set(fd, val) + if val.IsValid() { + m.Set(fd, val) + } return nil } @@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect. } case protoreflect.EnumKind: - if v, ok := unmarshalEnum(tok, fd); ok { + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { return v, nil } @@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { return protoreflect.ValueOfBytes(b), true } -func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { switch tok.Kind() { case json.String: // Lookup EnumNumber based on name. @@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { return protoreflect.ValueOfEnum(enumVal.Number()), true } + if discardUnknown { + return protoreflect.Value{}, true + } case json.Number: if n, ok := tok.Int(32); ok { @@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc if err != nil { return err } - list.Append(val) + if val.IsValid() { + list.Append(val) + } } } @@ -609,8 +628,9 @@ Loop: if err != nil { return err } - - mmap.Set(pkey, pval) + if pval.IsValid() { + mmap.Set(pkey, pval) + } } return nil diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 21d5d2cb..ae71007c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -6,6 +6,6 @@ // format. It follows the guide at // https://protobuf.dev/programming-guides/proto3#json. // -// This package produces a different output than the standard "encoding/json" +// This package produces a different output than the standard [encoding/json] // package, which does not operate correctly on protocol buffer messages. package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index 66b95870..3f75098b 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -31,7 +31,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in JSON format using default options. +// Marshal writes the given [proto.Message] in JSON format using default options. // Do not depend on the output being stable. It may change over time across // different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -81,6 +81,25 @@ type MarshalOptions struct { // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• EmitUnpopulated bool + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // â•”â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— + // â•‘ JSON │ Protobuf field â•‘ + // â• â•â•â•â•â•â•â•â•ªâ•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•£ + // â•‘ false │ non-optional scalar boolean fields â•‘ + // â•‘ 0 │ non-optional scalar numeric fields â•‘ + // â•‘ "" │ non-optional scalar string/byte fields â•‘ + // â•‘ [] │ empty repeated fields â•‘ + // â•‘ {} │ empty map fields â•‘ + // â•šâ•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + // Resolver is used for looking up types when expanding google.protobuf.Any // messages. If nil, this defaults to using protoregistry.GlobalTypes. Resolver interface { @@ -102,7 +121,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal marshals the given proto.Message in the JSON format using options in +// Marshal marshals the given [proto.Message] in the JSON format using options in // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { @@ -178,7 +197,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range // method to additionally iterate over unpopulated fields. -type unpopulatedFieldRanger struct{ protoreflect.Message } +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { fds := m.Descriptor().Fields() @@ -192,6 +215,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil if isProto2Scalar || isSingularMessage { + if m.skipNull { + continue + } v = protoreflect.Value{} // use invalid value to emit null } if !f(fd, v) { @@ -217,8 +243,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { defer e.EndObject() var fields order.FieldRanger = m - if e.opts.EmitUnpopulated { - fields = unpopulatedFieldRanger{m} + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} } if typeURL != "" { fields = typeURLFieldRanger{fields, typeURL} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index 6c37d417..25329b76 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error { // Use another decoder to parse the unread bytes for @type field. This // avoids advancing a read from current decoder because the current JSON // object may contain the fields of the embedded type. - dec := decoder{d.Clone(), UnmarshalOptions{}} + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} tok, err := findTypeURL(dec) switch err { case errEmptyObject: @@ -308,48 +308,25 @@ Loop: // array) in order to advance the read to the next JSON value. It relies on // the decoder returning an error if the types are not in valid sequence. func (d decoder) skipJSONValue() error { - tok, err := d.Read() - if err != nil { - return err - } - // Only need to continue reading for objects and arrays. - switch tok.Kind() { - case json.ObjectOpen: - for { - tok, err := d.Read() - if err != nil { - return err - } - switch tok.Kind() { - case json.ObjectClose: - return nil - case json.Name: - // Skip object field value. - if err := d.skipJSONValue(); err != nil { - return err - } - } + var open int + for { + tok, err := d.Read() + if err != nil { + return err } - - case json.ArrayOpen: - for { - tok, err := d.Peek() - if err != nil { - return err - } - switch tok.Kind() { - case json.ArrayClose: - d.Read() - return nil - default: - // Skip array item. - if err := d.skipJSONValue(); err != nil { - return err - } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") } } + if open == 0 { + return nil + } } - return nil } // unmarshalAnyValue unmarshals the given custom-type message from the JSON diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 4921b2d4..a45f112b 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -21,7 +21,7 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) -// Unmarshal reads the given []byte into the given proto.Message. +// Unmarshal reads the given []byte into the given [proto.Message]. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m proto.Message) error { return UnmarshalOptions{}.Unmarshal(b, m) @@ -51,7 +51,7 @@ type UnmarshalOptions struct { } } -// Unmarshal reads the given []byte and populates the given proto.Message +// Unmarshal reads the given []byte and populates the given [proto.Message] // using options in the UnmarshalOptions object. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { @@ -739,7 +739,9 @@ func (d decoder) skipValue() error { case text.ListClose: return nil case text.MessageOpen: - return d.skipMessageValue() + if err := d.skipMessageValue(); err != nil { + return err + } default: // Skip items. This will not validate whether skipped values are // of the same type or not, same behavior as C++ diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 722a7b41..95967e81 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -33,7 +33,7 @@ func Format(m proto.Message) string { return MarshalOptions{Multiline: true}.Format(m) } -// Marshal writes the given proto.Message in textproto format using default +// Marshal writes the given [proto.Message] in textproto format using default // options. Do not depend on the output being stable. It may change over time // across different versions of the program. func Marshal(m proto.Message) ([]byte, error) { @@ -97,7 +97,7 @@ func (o MarshalOptions) Format(m proto.Message) string { return string(b) } -// Marshal writes the given proto.Message in textproto format using options in +// Marshal writes the given [proto.Message] in textproto format using options in // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index f4b4686c..e942bc98 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -6,7 +6,7 @@ // See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, -// use the "google.golang.org/protobuf/proto" package instead. +// use the [google.golang.org/protobuf/proto] package instead. package protowire import ( @@ -87,7 +87,7 @@ func ParseError(n int) error { // ConsumeField parses an entire field record (both tag and value) and returns // the field number, the wire type, and the total length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). // // The total length includes the tag header and the end group marker (if the // field is a group). @@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) { } // ConsumeFieldValue parses a field value and returns its length. -// This assumes that the field Number and wire Type have already been parsed. -// This returns a negative length upon an error (see ParseError). +// This assumes that the field [Number] and wire [Type] have already been parsed. +// This returns a negative length upon an error (see [ParseError]). // // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. @@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte { } // ConsumeTag parses b as a varint-encoded tag, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeTag(b []byte) (Number, Type, int) { v, n := ConsumeVarint(b) if n < 0 { @@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte { } // ConsumeVarint parses b as a varint-encoded uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeVarint(b []byte) (v uint64, n int) { var y uint64 if len(b) <= 0 { @@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte { } // ConsumeFixed32 parses b as a little-endian uint32, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed32(b []byte) (v uint32, n int) { if len(b) < 4 { return 0, errCodeTruncated @@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte { } // ConsumeFixed64 parses b as a little-endian uint64, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeFixed64(b []byte) (v uint64, n int) { if len(b) < 8 { return 0, errCodeTruncated @@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte { } // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeBytes(b []byte) (v []byte, n int) { m, n := ConsumeVarint(b) if n < 0 { @@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte { } // ConsumeString parses b as a length-prefixed bytes value, reporting its length. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeString(b []byte) (v string, n int) { bb, n := ConsumeBytes(b) return string(bb), n @@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte { // ConsumeGroup parses b as a group value until the trailing end group marker, // and verifies that the end marker matches the provided num. The value v // does not contain the end marker, while the length does contain the end marker. -// This returns a negative length upon an error (see ParseError). +// This returns a negative length upon an error (see [ParseError]). func ConsumeGroup(num Number, b []byte) (v []byte, n int) { n = ConsumeFieldValue(num, StartGroupType, b) if n < 0 { @@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int { return n + SizeTag(num) } -// DecodeTag decodes the field Number and wire Type from its unified form. -// The Number is -1 if the decoded field number overflows int32. +// DecodeTag decodes the field [Number] and wire [Type] from its unified form. +// The [Number] is -1 if the decoded field number overflows int32. // Other than overflow, this does not check for field number validity. func DecodeTag(x uint64) (Number, Type) { // NOTE: MessageSet allows for larger field numbers than normal. @@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) { return Number(x >> 3), Type(x & 7) } -// EncodeTag encodes the field Number and wire Type into its unified form. +// EncodeTag encodes the field [Number] and wire [Type] into its unified form. func EncodeTag(num Number, typ Type) uint64 { return uint64(num)<<3 | uint64(typ&7) } diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index db5248e1..a45625c8 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records - rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + rv := reflect.ValueOf(vs.Get(i)) + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPublic"), "IsPublic"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + }...) ss = append(ss, "{"+rs.Join()+"}") } return start + joinStrings(ss, allowMulti) + end @@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } } -// descriptorAccessors is a list of accessors to print for each descriptor. -// -// Do not print all accessors since some contain redundant information, -// while others are pointers that we do not want to follow since the descriptor -// is actually a cyclic graph. -// -// Using a list allows us to print the accessors in a sensible order. -var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +type methodAndName struct { + method reflect.Value + name string } func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { - io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil)) } -func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { + +func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { + return formatDescOpt(t, isRoot, allowMulti, record) +} + +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { } _, isFile := t.(protoreflect.FileDescriptor) - rs := records{allowMulti: allowMulti} + rs := records{ + allowMulti: allowMulti, + record: record, + } if t.IsPlaceholder() { if isFile { - rs.Append(rv, "Path", "Package", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } else { - rs.Append(rv, "FullName", "IsPlaceholder") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("FullName"), "FullName"}, + {rv.MethodByName("IsPlaceholder"), "IsPlaceholder"}, + }...) } } else { switch { case isFile: - rs.Append(rv, "Syntax") + rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"}) case isRoot: - rs.Append(rv, "Syntax", "FullName") + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Syntax"), "Syntax"}, + {rv.MethodByName("FullName"), "FullName"}, + }...) default: - rs.Append(rv, "Name") + rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"}) } switch t := t.(type) { case protoreflect.FieldDescriptor: - for _, s := range descriptorAccessors[rt] { - switch s { + accessors := []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + {rv.MethodByName("Cardinality"), "Cardinality"}, + {rv.MethodByName("Kind"), "Kind"}, + {rv.MethodByName("HasJSONName"), "HasJSONName"}, + {rv.MethodByName("JSONName"), "JSONName"}, + {rv.MethodByName("HasPresence"), "HasPresence"}, + {rv.MethodByName("IsExtension"), "IsExtension"}, + {rv.MethodByName("IsPacked"), "IsPacked"}, + {rv.MethodByName("IsWeak"), "IsWeak"}, + {rv.MethodByName("IsList"), "IsList"}, + {rv.MethodByName("IsMap"), "IsMap"}, + {rv.MethodByName("MapKey"), "MapKey"}, + {rv.MethodByName("MapValue"), "MapValue"}, + {rv.MethodByName("HasDefault"), "HasDefault"}, + {rv.MethodByName("Default"), "Default"}, + {rv.MethodByName("ContainingOneof"), "ContainingOneof"}, + {rv.MethodByName("ContainingMessage"), "ContainingMessage"}, + {rv.MethodByName("Message"), "Message"}, + {rv.MethodByName("Enum"), "Enum"}, + } + for _, s := range accessors { + switch s.name { case "MapKey": if k := t.MapKey(); k != nil { rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) @@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { if v := t.MapValue(); v != nil { switch v.Kind() { case protoreflect.EnumKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())}) case protoreflect.MessageKind, protoreflect.GroupKind: - rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())}) default: - rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()}) } } case "ContainingOneof": if od := t.ContainingOneof(); od != nil { - rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())}) } case "ContainingMessage": if t.IsExtension() { - rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())}) } case "Message": if !t.IsMap() { @@ -187,13 +219,61 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { ss = append(ss, string(fs.Get(i).Name())) } if len(ss) > 0 { - rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) } - default: - rs.Append(rv, descriptorAccessors[rt]...) + + case protoreflect.FileDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Path"), "Path"}, + {rv.MethodByName("Package"), "Package"}, + {rv.MethodByName("Imports"), "Imports"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + {rv.MethodByName("Services"), "Services"}, + }...) + + case protoreflect.MessageDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("IsMapEntry"), "IsMapEntry"}, + {rv.MethodByName("Fields"), "Fields"}, + {rv.MethodByName("Oneofs"), "Oneofs"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + {rv.MethodByName("RequiredNumbers"), "RequiredNumbers"}, + {rv.MethodByName("ExtensionRanges"), "ExtensionRanges"}, + {rv.MethodByName("Messages"), "Messages"}, + {rv.MethodByName("Enums"), "Enums"}, + {rv.MethodByName("Extensions"), "Extensions"}, + }...) + + case protoreflect.EnumDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Values"), "Values"}, + {rv.MethodByName("ReservedNames"), "ReservedNames"}, + {rv.MethodByName("ReservedRanges"), "ReservedRanges"}, + }...) + + case protoreflect.EnumValueDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Number"), "Number"}, + }...) + + case protoreflect.ServiceDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Methods"), "Methods"}, + }...) + + case protoreflect.MethodDescriptor: + rs.Append(rv, []methodAndName{ + {rv.MethodByName("Input"), "Input"}, + {rv.MethodByName("Output"), "Output"}, + {rv.MethodByName("IsStreamingClient"), "IsStreamingClient"}, + {rv.MethodByName("IsStreamingServer"), "IsStreamingServer"}, + }...) } - if rv.MethodByName("GoType").IsValid() { - rs.Append(rv, "GoType") + if m := rv.MethodByName("GoType"); m.IsValid() { + rs.Append(rv, methodAndName{m, "GoType"}) } } return start + rs.Join() + end @@ -202,19 +282,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { type records struct { recs [][2]string allowMulti bool + + // record is a function that will be called for every Append() or + // AppendRecs() call, to be used for testing with the + // InternalFormatDescOptForTesting function. + record func(string) } -func (rs *records) Append(v reflect.Value, accessors ...string) { +func (rs *records) AppendRecs(fieldName string, newRecs [2]string) { + if rs.record != nil { + rs.record(fieldName) + } + rs.recs = append(rs.recs, newRecs) +} + +func (rs *records) Append(v reflect.Value, accessors ...methodAndName) { for _, a := range accessors { + if rs.record != nil { + rs.record(a.name) + } var rv reflect.Value - if m := v.MethodByName(a); m.IsValid() { - rv = m.Call(nil)[0] + if a.method.IsValid() { + rv = a.method.Call(nil)[0] } if v.Kind() == reflect.Struct && !rv.IsValid() { - rv = v.FieldByName(a) + rv = v.FieldByName(a.name) } if !rv.IsValid() { - panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name)) } if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] @@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { default: s = fmt.Sprint(v) } - rs.recs = append(rs.recs, [2]string{a, s}) + rs.recs = append(rs.recs, [2]string{a.name, s}) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 7c3689ba..193c68e8 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -21,11 +21,26 @@ import ( "google.golang.org/protobuf/reflect/protoregistry" ) +// Edition is an Enum for proto2.Edition +type Edition int32 + +// These values align with the value of Enum in descriptor.proto which allows +// direct conversion between the proto enum and this enum. +const ( + EditionUnknown Edition = 0 + EditionProto2 Edition = 998 + EditionProto3 Edition = 999 + Edition2023 Edition = 1000 + EditionUnsupported Edition = 100000 +) + // The types in this file may have a suffix: // • L0: Contains fields common to all descriptors (except File) and // must be initialized up front. // • L1: Contains fields specific to a descriptor and -// must be initialized up front. +// must be initialized up front. If the associated proto uses Editions, the +// Editions features must always be resolved. If not explicitly set, the +// appropriate default must be resolved and set. // • L2: Contains fields that are lazily initialized when constructing // from the raw file descriptor. When constructing as a literal, the L2 // fields must be initialized up front. @@ -44,6 +59,7 @@ type ( } FileL1 struct { Syntax protoreflect.Syntax + Edition Edition // Only used if Syntax == Editions Path string Package protoreflect.FullName @@ -51,12 +67,35 @@ type ( Messages Messages Extensions Extensions Services Services + + EditionFeatures FileEditionFeatures } FileL2 struct { Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } + + FileEditionFeatures struct { + // IsFieldPresence is true if field_presence is EXPLICIT + // https://protobuf.dev/editions/features/#field_presence + IsFieldPresence bool + // IsOpenEnum is true if enum_type is OPEN + // https://protobuf.dev/editions/features/#enum_type + IsOpenEnum bool + // IsPacked is true if repeated_field_encoding is PACKED + // https://protobuf.dev/editions/features/#repeated_field_encoding + IsPacked bool + // IsUTF8Validated is true if utf_validation is VERIFY + // https://protobuf.dev/editions/features/#utf8_validation + IsUTF8Validated bool + // IsDelimitedEncoded is true if message_encoding is DELIMITED + // https://protobuf.dev/editions/features/#message_encoding + IsDelimitedEncoded bool + // IsJSONCompliant is true if json_format is ALLOW + // https://protobuf.dev/editions/features/#json_format + IsJSONCompliant bool + } ) func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } @@ -210,6 +249,9 @@ type ( ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor Message protoreflect.MessageDescriptor + + // Edition features. + Presence bool } Oneof struct { @@ -273,6 +315,9 @@ func (fd *Field) HasJSONName() bool { return fd.L1.StringNam func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { + if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions { + return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil + } return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } func (fd *Field) HasOptionalKeyword() bool { diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index 136f1b21..8f94230e 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -12,6 +12,12 @@ import ( const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" +// Full and short names for google.protobuf.Edition. +const ( + Edition_enum_fullname = "google.protobuf.Edition" + Edition_enum_name = "Edition" +) + // Names for google.protobuf.FileDescriptorSet. const ( FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" @@ -81,7 +87,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 - FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 14 ) // Names for google.protobuf.DescriptorProto. @@ -184,10 +190,12 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Features_field_name protoreflect.Name = "features" ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features" ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) @@ -195,6 +203,7 @@ const ( const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Features_field_number protoreflect.FieldNumber = 50 ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 ) @@ -212,29 +221,26 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" - ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" - ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" - ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" - ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" - ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" - ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" - ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" - ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" - ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" - ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" ) // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. const ( - ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 - ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 - ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 - ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 - ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 - ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -478,6 +484,7 @@ const ( FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_Features_field_name protoreflect.Name = "features" FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" @@ -500,6 +507,7 @@ const ( FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.features" FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" ) @@ -525,6 +533,7 @@ const ( FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_Features_field_number protoreflect.FieldNumber = 50 FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -547,6 +556,7 @@ const ( MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_name protoreflect.Name = "features" MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" @@ -554,6 +564,7 @@ const ( MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.features" MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) @@ -564,6 +575,7 @@ const ( MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_Features_field_number protoreflect.FieldNumber = 12 MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -584,8 +596,9 @@ const ( FieldOptions_Weak_field_name protoreflect.Name = "weak" FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" FieldOptions_Retention_field_name protoreflect.Name = "retention" - FieldOptions_Target_field_name protoreflect.Name = "target" FieldOptions_Targets_field_name protoreflect.Name = "targets" + FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults" + FieldOptions_Features_field_name protoreflect.Name = "features" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" @@ -597,8 +610,9 @@ const ( FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" - FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" + FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults" + FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -613,8 +627,9 @@ const ( FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 - FieldOptions_Target_field_number protoreflect.FieldNumber = 18 FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 + FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20 + FieldOptions_Features_field_number protoreflect.FieldNumber = 21 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -642,6 +657,27 @@ const ( FieldOptions_OptionTargetType_enum_name = "OptionTargetType" ) +// Names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_message_name protoreflect.Name = "EditionDefault" + FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault" +) + +// Field names for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition" + FieldOptions_EditionDefault_Value_field_name protoreflect.Name = "value" + + FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition" + FieldOptions_EditionDefault_Value_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value" +) + +// Field numbers for google.protobuf.FieldOptions.EditionDefault. +const ( + FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -650,13 +686,16 @@ const ( // Field names for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_name protoreflect.Name = "features" OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + OneofOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.features" OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" ) // Field numbers for google.protobuf.OneofOptions. const ( + OneofOptions_Features_field_number protoreflect.FieldNumber = 1 OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -671,11 +710,13 @@ const ( EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_name protoreflect.Name = "features" EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.features" EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) @@ -684,6 +725,7 @@ const ( EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_Features_field_number protoreflect.FieldNumber = 7 EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -696,15 +738,21 @@ const ( // Field names for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_Features_field_name protoreflect.Name = "features" + EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features" + EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact" EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumValueOptions. const ( EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2 + EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3 EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -716,15 +764,18 @@ const ( // Field names for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_name protoreflect.Name = "features" ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ServiceOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.features" ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" ) // Field numbers for google.protobuf.ServiceOptions. const ( + ServiceOptions_Features_field_number protoreflect.FieldNumber = 34 ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -739,10 +790,12 @@ const ( const ( MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_Features_field_name protoreflect.Name = "features" MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.features" MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" ) @@ -750,6 +803,7 @@ const ( const ( MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_Features_field_number protoreflect.FieldNumber = 35 MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -816,6 +870,120 @@ const ( UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 ) +// Names for google.protobuf.FeatureSet. +const ( + FeatureSet_message_name protoreflect.Name = "FeatureSet" + FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet" +) + +// Field names for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence" + FeatureSet_EnumType_field_name protoreflect.Name = "enum_type" + FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding" + FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" + FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" + FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + + FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" + FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" + FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding" + FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" + FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" + FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" +) + +// Field numbers for google.protobuf.FeatureSet. +const ( + FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1 + FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2 + FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3 + FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 + FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 + FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 +) + +// Full and short names for google.protobuf.FeatureSet.FieldPresence. +const ( + FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence" + FeatureSet_FieldPresence_enum_name = "FieldPresence" +) + +// Full and short names for google.protobuf.FeatureSet.EnumType. +const ( + FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType" + FeatureSet_EnumType_enum_name = "EnumType" +) + +// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding. +const ( + FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding" + FeatureSet_RepeatedFieldEncoding_enum_name = "RepeatedFieldEncoding" +) + +// Full and short names for google.protobuf.FeatureSet.Utf8Validation. +const ( + FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation" + FeatureSet_Utf8Validation_enum_name = "Utf8Validation" +) + +// Full and short names for google.protobuf.FeatureSet.MessageEncoding. +const ( + FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding" + FeatureSet_MessageEncoding_enum_name = "MessageEncoding" +) + +// Full and short names for google.protobuf.FeatureSet.JsonFormat. +const ( + FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat" + FeatureSet_JsonFormat_enum_name = "JsonFormat" +) + +// Names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" + FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults" +) + +// Field names for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_name protoreflect.Name = "defaults" + FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition" + FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition" + + FeatureSetDefaults_Defaults_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults" + FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition" + FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition" +) + +// Field numbers for google.protobuf.FeatureSetDefaults. +const ( + FeatureSetDefaults_Defaults_field_number protoreflect.FieldNumber = 1 + FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4 + FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_message_name protoreflect.Name = "FeatureSetEditionDefault" + FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +) + +// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features" + + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition" + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features" +) + +// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault. +const ( + FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3 + FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2 +) + // Names for google.protobuf.SourceCodeInfo. const ( SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go index 1a509b63..f55dc01e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.BoolSlice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growBoolSlice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := 0 + for _, v := range b { + if v < 0x80 { + count++ + } + } + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { var v uint64 var n int @@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growInt32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growUint32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float32Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed32() + if count > 0 { + p.growFloat32Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed32(b) if n < 0 { @@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Int64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growInt64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Uint64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growUint64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { @@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { sp := p.Float64Slice() if wtyp == protowire.BytesType { - s := *sp b, n := protowire.ConsumeBytes(b) if n < 0 { return out, errDecode } + count := len(b) / protowire.SizeFixed64() + if count > 0 { + p.growFloat64Slice(count) + } + s := *sp for len(b) > 0 { v, n := protowire.ConsumeFixed64(b) if n < 0 { diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 61c483fa..2ab2c629 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -206,13 +206,18 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName // Obtain a list of oneof wrapper types. var oneofWrappers []reflect.Type - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := t.MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - for _, v := range vs { - oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) - } + methods := make([]reflect.Method, 0, 2) + if m, ok := t.MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := t.MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 4f5fb67a..629bacdc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -192,12 +192,17 @@ fieldLoop: // Derive a mapping of oneof wrappers to fields. oneofWrappers := mi.OneofWrappers - for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { - if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { - for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { - if vs, ok := v.Interface().([]interface{}); ok { - oneofWrappers = vs - } + methods := make([]reflect.Method, 0, 2) + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + methods = append(methods, m) + } + if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + methods = append(methods, m) + } + for _, fn := range methods { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs } } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 4c491bdf..517e9443 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) { p.v.Elem().Set(v.v) } +func growSlice(p pointer, addCap int) { + // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. + in := p.v.Elem() + out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) + reflect.Copy(out, in) + p.v.Elem().Set(out) +} + +func (p pointer) growBoolSlice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint32Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growUint64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + growSlice(p, addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + growSlice(p, addCap) +} + func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } func (ms *messageState) pointer() pointer { panic("not supported") } func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index ee0e0573..4b020e31 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) { *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) } +func (p pointer) growBoolSlice(addCap int) { + sp := p.BoolSlice() + s := make([]bool, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growInt32Slice(addCap int) { + sp := p.Int32Slice() + s := make([]int32, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growFloat32Slice(addCap int) { + p.growInt32Slice(addCap) +} + +func (p pointer) growInt64Slice(addCap int) { + sp := p.Int64Slice() + s := make([]int64, 0, addCap+len(*sp)) + s = s[:len(*sp)] + copy(s, *sp) + *sp = s +} + +func (p pointer) growUint64Slice(addCap int) { + p.growInt64Slice(addCap) +} + +func (p pointer) growFloat64Slice(addCap int) { + p.growInt64Slice(addCap) +} + // Static check that MessageState does not exceed the size of a pointer. const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go similarity index 96% rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index 61a84d34..a008acd0 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go new file mode 100644 index 00000000..60166f2b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package strs + +import ( + "unsafe" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) string { + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return protoreflect.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 0999f29d..d8f48faf 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,7 +51,7 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 31 + Minor = 32 Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 48d47946..e5b03b56 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -69,7 +69,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // UnmarshalState parses a wire-format message and places the result in m. // // This method permits fine-grained control over the unmarshaler. -// Most users should use Unmarshal instead. +// Most users should use [Unmarshal] instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { if o.RecursionLimit == 0 { o.RecursionLimit = protowire.DefaultRecursionLimit diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index ec71e717..80ed16a0 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -18,27 +18,27 @@ // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. // -// • Size reports the size of a message in the wire format. +// - [Size] reports the size of a message in the wire format. // -// • Marshal converts a message to the wire format. -// The MarshalOptions type provides more control over wire marshaling. +// - [Marshal] converts a message to the wire format. +// The [MarshalOptions] type provides more control over wire marshaling. // -// • Unmarshal converts a message from the wire format. -// The UnmarshalOptions type provides more control over wire unmarshaling. +// - [Unmarshal] converts a message from the wire format. +// The [UnmarshalOptions] type provides more control over wire unmarshaling. // // # Basic message operations // -// • Clone makes a deep copy of a message. +// - [Clone] makes a deep copy of a message. // -// • Merge merges the content of a message into another. +// - [Merge] merges the content of a message into another. // -// • Equal compares two messages. For more control over comparisons -// and detailed reporting of differences, see package -// "google.golang.org/protobuf/testing/protocmp". +// - [Equal] compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// [google.golang.org/protobuf/testing/protocmp]. // -// • Reset clears the content of a message. +// - [Reset] clears the content of a message. // -// • CheckInitialized reports whether all required fields in a message are set. +// - [CheckInitialized] reports whether all required fields in a message are set. // // # Optional scalar constructors // @@ -46,9 +46,9 @@ // as pointers to a value. For example, an optional string field has the // Go type *string. // -// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String -// take a value and return a pointer to a new instance of it, -// to simplify construction of optional field values. +// - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String] +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. // // Generated enum types usually have an Enum method which performs the // same operation. @@ -57,29 +57,29 @@ // // # Extension accessors // -// • HasExtension, GetExtension, SetExtension, and ClearExtension -// access extension field values in a protocol buffer message. +// - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension] +// access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // // # Related packages // -// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to -// and from JSON. +// - Package [google.golang.org/protobuf/encoding/protojson] converts messages to +// and from JSON. // -// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to -// and from the text format. +// - Package [google.golang.org/protobuf/encoding/prototext] converts messages to +// and from the text format. // -// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a -// reflection interface for protocol buffer data types. +// - Package [google.golang.org/protobuf/reflect/protoreflect] provides a +// reflection interface for protocol buffer data types. // -// • Package "google.golang.org/protobuf/testing/protocmp" provides features -// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" -// package. +// - Package [google.golang.org/protobuf/testing/protocmp] provides features +// to compare protocol buffer messages with the [github.com/google/go-cmp/cmp] +// package. // -// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic -// message type, suitable for working with messages where the protocol buffer -// type is only known at runtime. +// - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. // // This module contains additional packages for more specialized use cases. // Consult the individual package documentation for details. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index bf7f816d..4fed202f 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -129,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { // MarshalState returns the wire-format encoding of a message. // // This method permits fine-grained control over the marshaler. -// Most users should use Marshal instead. +// Most users should use [Marshal] instead. func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { return o.marshal(in.Buf, in.Message) } diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index 5f293cda..17899a3a 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -26,7 +26,7 @@ func HasExtension(m Message, xt protoreflect.ExtensionType) bool { } // ClearExtension clears an extension field such that subsequent -// HasExtension calls return false. +// [HasExtension] calls return false. // It panics if m is invalid or if xt does not extend m. func ClearExtension(m Message, xt protoreflect.ExtensionType) { m.ProtoReflect().Clear(xt.TypeDescriptor()) diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go index d761ab33..3c6fe578 100644 --- a/vendor/google.golang.org/protobuf/proto/merge.go +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -21,7 +21,7 @@ import ( // The unknown fields of src are appended to the unknown fields of dst. // // It is semantically equivalent to unmarshaling the encoded form of src -// into dst with the UnmarshalOptions.Merge option specified. +// into dst with the [UnmarshalOptions.Merge] option specified. func Merge(dst, src Message) { // TODO: Should nil src be treated as semantically equivalent to a // untyped, read-only, empty message? What about a nil dst? diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go index 1f0d183b..7543ee6b 100644 --- a/vendor/google.golang.org/protobuf/proto/proto.go +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -15,18 +15,20 @@ import ( // protobuf module that accept a Message, except where otherwise specified. // // This is the v2 interface definition for protobuf messages. -// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// The v1 interface definition is [github.com/golang/protobuf/proto.Message]. // -// To convert a v1 message to a v2 message, -// use "github.com/golang/protobuf/proto".MessageV2. -// To convert a v2 message to a v1 message, -// use "github.com/golang/protobuf/proto".MessageV1. +// - To convert a v1 message to a v2 message, +// use [google.golang.org/protobuf/protoadapt.MessageV2Of]. +// - To convert a v2 message to a v1 message, +// use [google.golang.org/protobuf/protoadapt.MessageV1Of]. type Message = protoreflect.ProtoMessage -// Error matches all errors produced by packages in the protobuf module. +// Error matches all errors produced by packages in the protobuf module +// according to [errors.Is]. // -// That is, errors.Is(err, Error) reports whether an error is produced -// by this module. +// Example usage: +// +// if errors.Is(err, proto.Error) { ... } var Error error func init() { diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index e4dfb120..baa0cc62 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -3,11 +3,11 @@ // license that can be found in the LICENSE file. // Package protodesc provides functionality for converting -// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values. // // The google.protobuf.FileDescriptorProto is a protobuf message that describes // the type information for a .proto file in a form that is easily serializable. -// The protoreflect.FileDescriptor is a more structured representation of +// The [protoreflect.FileDescriptor] is a more structured representation of // the FileDescriptorProto message where references and remote dependencies // can be directly followed. package protodesc @@ -24,11 +24,11 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// Resolver is the resolver used by NewFile to resolve dependencies. +// Resolver is the resolver used by [NewFile] to resolve dependencies. // The enums and messages provided must belong to some parent file, // which is also registered. // -// It is implemented by protoregistry.Files. +// It is implemented by [protoregistry.Files]. type Resolver interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) @@ -61,19 +61,19 @@ type FileOptions struct { AllowUnresolvable bool } -// NewFile creates a new protoreflect.FileDescriptor from the provided -// file descriptor message. See FileOptions.New for more information. +// NewFile creates a new [protoreflect.FileDescriptor] from the provided +// file descriptor message. See [FileOptions.New] for more information. func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { return FileOptions{}.New(fd, r) } -// NewFiles creates a new protoregistry.Files from the provided -// FileDescriptorSet message. See FileOptions.NewFiles for more information. +// NewFiles creates a new [protoregistry.Files] from the provided +// FileDescriptorSet message. See [FileOptions.NewFiles] for more information. func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { return FileOptions{}.NewFiles(fd) } -// New creates a new protoreflect.FileDescriptor from the provided +// New creates a new [protoreflect.FileDescriptor] from the provided // file descriptor message. The file must represent a valid proto file according // to protobuf semantics. The returned descriptor is a deep copy of the input. // @@ -93,9 +93,15 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot f.L1.Syntax = protoreflect.Proto2 case "proto3": f.L1.Syntax = protoreflect.Proto3 + case "editions": + f.L1.Syntax = protoreflect.Editions + f.L1.Edition = fromEditionProto(fd.GetEdition()) default: return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) } + if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) { + return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition()) + } f.L1.Path = fd.GetName() if f.L1.Path == "" { return nil, errors.New("file path must be populated") @@ -108,6 +114,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot opts = proto.Clone(opts).(*descriptorpb.FileOptions) f.L2.Options = func() protoreflect.ProtoMessage { return opts } } + if f.L1.Syntax == protoreflect.Editions { + initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures()) + } f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) for _, i := range fd.GetPublicDependency() { @@ -231,7 +240,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) { } } -// NewFiles creates a new protoregistry.Files from the provided +// NewFiles creates a new [protoregistry.Files] from the provided // FileDescriptorSet message. The descriptor set must include only // valid files according to protobuf semantics. The returned descriptors // are a deep copy of the input. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 37efda1a..aff6fd49 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -137,6 +137,30 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if fd.JsonName != nil { f.L1.StringName.InitJSON(fd.GetJsonName()) } + + if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions { + f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd) + // We reuse the existing field because the old option `[packed = + // true]` is mutually exclusive with the editions feature. + if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED { + f.L1.HasPacked = true + f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd) + } + + // We pretend this option is always explicitly set because the only + // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8 + // or to return the appropriate default. + // When using editions we either parse the option or resolve the + // appropriate default here (instead of later when this option is + // requested from the descriptor). + // In proto2/proto3 syntax HasEnforceUTF8 might be false. + f.L1.HasEnforceUTF8 = true + f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd) + + if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) { + f.L1.Kind = protoreflect.GroupKind + } + } } return fs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go new file mode 100644 index 00000000..7352926c --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -0,0 +1,177 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + _ "embed" + "fmt" + "os" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2 + SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 +) + +//go:embed editions_defaults.binpb +var binaryEditionDefaults []byte +var defaults = &descriptorpb.FeatureSetDefaults{} +var defaultsCacheMu sync.Mutex +var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet) + +func init() { + err := proto.Unmarshal(binaryEditionDefaults, defaults) + if err != nil { + fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err) + os.Exit(1) + } +} + +func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition { + return filedesc.Edition(epb) +} + +func toEditionProto(ed filedesc.Edition) descriptorpb.Edition { + switch ed { + case filedesc.EditionUnknown: + return descriptorpb.Edition_EDITION_UNKNOWN + case filedesc.EditionProto2: + return descriptorpb.Edition_EDITION_PROTO2 + case filedesc.EditionProto3: + return descriptorpb.Edition_EDITION_PROTO3 + case filedesc.Edition2023: + return descriptorpb.Edition_EDITION_2023 + default: + panic(fmt.Sprintf("unknown value for edition: %v", ed)) + } +} + +func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet { + defaultsCacheMu.Lock() + defer defaultsCacheMu.Unlock() + if def, ok := defaultsCache[ed]; ok { + return def + } + edpb := toEditionProto(ed) + if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb { + // This should never happen protodesc.(FileOptions).New would fail when + // initializing the file descriptor. + // This most likely means the embedded defaults were not updated. + fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb) + os.Exit(1) + } + fs := defaults.GetDefaults()[0].GetFeatures() + // Using a linear search for now. + // Editions are guaranteed to be sorted and thus we could use a binary search. + // Given that there are only a handful of editions (with one more per year) + // there is not much reason to use a binary search. + for _, def := range defaults.GetDefaults() { + if def.GetEdition() <= edpb { + fs = def.GetFeatures() + } else { + break + } + } + defaultsCache[ed] = fs + return fs +} + +func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.FieldPresence == nil { + return fileDesc.L1.EditionFeatures.IsFieldPresence + } + return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED || + fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT +} + +func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.RepeatedFieldEncoding == nil { + return fileDesc.L1.EditionFeatures.IsPacked + } + return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED +} + +func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.Utf8Validation == nil { + return fileDesc.L1.EditionFeatures.IsUTF8Validated + } + return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY +} + +func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool { + fs := fieldDesc.GetOptions().GetFeatures() + if fs == nil || fs.MessageEncoding == nil { + return fileDesc.L1.EditionFeatures.IsDelimitedEncoded + } + return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED +} + +// initFileDescFromFeatureSet initializes editions related fields in fd based +// on fs. If fs is nil it is assumed to be an empty featureset and all fields +// will be initialized with the appropriate default. fd.L1.Edition must be set +// before calling this function. +func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) { + dfs := getFeatureSetFor(fd.L1.Edition) + if fs == nil { + fs = &descriptorpb.FeatureSet{} + } + + var fieldPresence descriptorpb.FeatureSet_FieldPresence + if fp := fs.FieldPresence; fp != nil { + fieldPresence = *fp + } else { + fieldPresence = *dfs.FieldPresence + } + fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED || + fieldPresence == descriptorpb.FeatureSet_EXPLICIT + + var enumType descriptorpb.FeatureSet_EnumType + if et := fs.EnumType; et != nil { + enumType = *et + } else { + enumType = *dfs.EnumType + } + fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN + + var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding + if rfe := fs.RepeatedFieldEncoding; rfe != nil { + respeatedFieldEncoding = *rfe + } else { + respeatedFieldEncoding = *dfs.RepeatedFieldEncoding + } + fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED + + var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation + if utf8val := fs.Utf8Validation; utf8val != nil { + isUTF8Validated = *utf8val + } else { + isUTF8Validated = *dfs.Utf8Validation + } + fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY + + var messageEncoding descriptorpb.FeatureSet_MessageEncoding + if me := fs.MessageEncoding; me != nil { + messageEncoding = *me + } else { + messageEncoding = *dfs.MessageEncoding + } + fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED + + var jsonFormat descriptorpb.FeatureSet_JsonFormat + if jf := fs.JsonFormat; jf != nil { + jsonFormat = *jf + } else { + jsonFormat = *dfs.JsonFormat + } + fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb new file mode 100644 index 00000000..1a8610a8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb @@ -0,0 +1,4 @@ + +  (0æ +  (0ç +  (0è æ(è \ No newline at end of file diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a7c5ceff..9d6e0542 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/types/descriptorpb" ) -// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a // google.protobuf.FileDescriptorProto message. func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { p := &descriptorpb.FileDescriptorProto{ @@ -70,13 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) } - if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() { p.Syntax = proto.String(file.Syntax().String()) } return p } -// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a // google.protobuf.DescriptorProto message. func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { p := &descriptorpb.DescriptorProto{ @@ -119,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des return p } -// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a // google.protobuf.FieldDescriptorProto message. func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { p := &descriptorpb.FieldDescriptorProto{ @@ -168,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi return p } -// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a // google.protobuf.OneofDescriptorProto message. func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { return &descriptorpb.OneofDescriptorProto{ @@ -177,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On } } -// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a // google.protobuf.EnumDescriptorProto message. func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { p := &descriptorpb.EnumDescriptorProto{ @@ -200,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD return p } -// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a // google.protobuf.EnumValueDescriptorProto message. func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { return &descriptorpb.EnumValueDescriptorProto{ @@ -210,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip } } -// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a // google.protobuf.ServiceDescriptorProto message. func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { p := &descriptorpb.ServiceDescriptorProto{ @@ -223,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto return p } -// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a // google.protobuf.MethodDescriptorProto message. func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { p := &descriptorpb.MethodDescriptorProto{ diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index 55aa1492..ec6572df 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -10,46 +10,46 @@ // // # Protocol Buffer Descriptors // -// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor]) // are immutable objects that represent protobuf type information. // They are wrappers around the messages declared in descriptor.proto. // Protobuf descriptors alone lack any information regarding Go types. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Descriptor and ProtoReflect.Descriptor accessors respectively // return the protobuf descriptor for the values. // // The protobuf descriptor interfaces are not meant to be implemented by // user code since they might need to be extended in the future to support // additions to the protobuf language. -// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// The [google.golang.org/protobuf/reflect/protodesc] package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // // # Go Type Descriptors // -// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. // There is commonly a one-to-one relationship between protobuf descriptors and // Go type descriptors, but it can potentially be a one-to-many relationship. // -// Enums and messages generated by this module implement Enum and ProtoMessage, +// Enums and messages generated by this module implement [Enum] and [ProtoMessage], // where the Type and ProtoReflect.Type accessors respectively // return the protobuf descriptor for the values. // -// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// The [google.golang.org/protobuf/types/dynamicpb] package can be used to // create Go type descriptors from protobuf descriptors. // // # Value Interfaces // -// The Enum and Message interfaces provide a reflective view over an +// The [Enum] and [Message] interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve // the enum value number for any concrete enum type. For messages, it provides // the ability to access or manipulate fields of the message. // -// To convert a proto.Message to a protoreflect.Message, use the +// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the // former's ProtoReflect method. Since the ProtoReflect method is new to the // v2 message interface, it may not be present on older message implementations. -// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// The [github.com/golang/protobuf/proto.MessageReflect] function can be used // to obtain a reflective view on older messages. // // # Relationships @@ -71,12 +71,12 @@ // │ │ // └────────────────── Type() ───────┘ // -// • An EnumType describes a concrete Go enum type. +// • An [EnumType] describes a concrete Go enum type. // It has an EnumDescriptor and can construct an Enum instance. // -// • An EnumDescriptor describes an abstract protobuf enum type. +// • An [EnumDescriptor] describes an abstract protobuf enum type. // -// • An Enum is a concrete enum instance. Generated enums implement Enum. +// • An [Enum] is a concrete enum instance. Generated enums implement Enum. // // ┌──────────────── New() ─────────────────┠// │ │ @@ -90,24 +90,26 @@ // │ │ // └─────────────────── Type() ─────────┘ // -// • A MessageType describes a concrete Go message type. -// It has a MessageDescriptor and can construct a Message instance. -// Just as how Go's reflect.Type is a reflective description of a Go type, -// a MessageType is a reflective description of a Go type for a protobuf message. +// • A [MessageType] describes a concrete Go message type. +// It has a [MessageDescriptor] and can construct a [Message] instance. +// Just as how Go's [reflect.Type] is a reflective description of a Go type, +// a [MessageType] is a reflective description of a Go type for a protobuf message. // -// • A MessageDescriptor describes an abstract protobuf message type. -// It has no understanding of Go types. In order to construct a MessageType -// from just a MessageDescriptor, you can consider looking up the message type -// in the global registry using protoregistry.GlobalTypes.FindMessageByName -// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// • A [MessageDescriptor] describes an abstract protobuf message type. +// It has no understanding of Go types. In order to construct a [MessageType] +// from just a [MessageDescriptor], you can consider looking up the message type +// in the global registry using the FindMessageByName method on +// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes] +// or constructing a dynamic [MessageType] using +// [google.golang.org/protobuf/types/dynamicpb.NewMessageType]. // -// • A Message is a reflective view over a concrete message instance. -// Generated messages implement ProtoMessage, which can convert to a Message. -// Just as how Go's reflect.Value is a reflective view over a Go value, -// a Message is a reflective view over a concrete protobuf message instance. -// Using Go reflection as an analogy, the ProtoReflect method is similar to -// calling reflect.ValueOf, and the Message.Interface method is similar to -// calling reflect.Value.Interface. +// • A [Message] is a reflective view over a concrete message instance. +// Generated messages implement [ProtoMessage], which can convert to a [Message]. +// Just as how Go's [reflect.Value] is a reflective view over a Go value, +// a [Message] is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to +// calling [reflect.ValueOf], and the [Message.Interface] method is similar to +// calling [reflect.Value.Interface]. // // ┌── TypeDescriptor() ──┠┌───── Descriptor() ─────┠// │ V │ V @@ -119,15 +121,15 @@ // │ │ // └────── implements ────────┘ // -// • An ExtensionType describes a concrete Go implementation of an extension. -// It has an ExtensionTypeDescriptor and can convert to/from -// abstract Values and Go values. +// • An [ExtensionType] describes a concrete Go implementation of an extension. +// It has an [ExtensionTypeDescriptor] and can convert to/from +// an abstract [Value] and a Go value. // -// • An ExtensionTypeDescriptor is an ExtensionDescriptor -// which also has an ExtensionType. +// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor] +// which also has an [ExtensionType]. // -// • An ExtensionDescriptor describes an abstract protobuf extension field and -// may not always be an ExtensionTypeDescriptor. +// • An [ExtensionDescriptor] describes an abstract protobuf extension field and +// may not always be an [ExtensionTypeDescriptor]. package protoreflect import ( @@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement // ProtoMessage is the top-level interface that all proto messages implement. // This is declared in the protoreflect package to avoid a cyclic dependency; -// use the proto.Message type instead, which aliases this type. +// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type. type ProtoMessage interface{ ProtoReflect() Message } // Syntax is the language version of the proto file. @@ -151,8 +153,9 @@ type Syntax syntax type syntax int8 // keep exact type opaque as the int type may change const ( - Proto2 Syntax = 2 - Proto3 Syntax = 3 + Proto2 Syntax = 2 + Proto3 Syntax = 3 + Editions Syntax = 4 ) // IsValid reports whether the syntax is valid. @@ -436,7 +439,7 @@ type Names interface { // FullName is a qualified name that uniquely identifies a proto declaration. // A qualified name is the concatenation of the proto package along with the // fully-declared name (i.e., name of parent preceding the name of the child), -// with a '.' delimiter placed between each Name. +// with a '.' delimiter placed between each [Name]. // // This should not have any leading or trailing dots. type FullName string // e.g., "google.protobuf.Field.Kind" @@ -480,7 +483,7 @@ func isLetterDigit(c byte) bool { } // Name returns the short name, which is the last identifier segment. -// A single segment FullName is the Name itself. +// A single segment FullName is the [Name] itself. func (n FullName) Name() Name { if i := strings.LastIndexByte(string(n), '.'); i >= 0 { return Name(n[i+1:]) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index 717b106f..0c045db6 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) - case 13: + case 14: b = p.appendSingularField(b, "edition", nil) } return b @@ -180,6 +180,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte { b = p.appendSingularField(b, "php_metadata_namespace", nil) case 45: b = p.appendSingularField(b, "ruby_package", nil) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -240,6 +242,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "map_entry", nil) case 11: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 12: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -285,6 +289,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 6: b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) + case 7: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -330,6 +336,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte { return b } switch (*p)[0] { + case 34: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 33: b = p.appendSingularField(b, "deprecated", nil) case 999: @@ -361,16 +369,39 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "debug_redact", nil) case 17: b = p.appendSingularField(b, "retention", nil) - case 18: - b = p.appendSingularField(b, "target", nil) case 19: b = p.appendRepeatedField(b, "targets", nil) + case 20: + b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault) + case 21: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFeatureSet(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "field_presence", nil) + case 2: + b = p.appendSingularField(b, "enum_type", nil) + case 3: + b = p.appendSingularField(b, "repeated_field_encoding", nil) + case 4: + b = p.appendSingularField(b, "utf8_validation", nil) + case 5: + b = p.appendSingularField(b, "message_encoding", nil) + case 6: + b = p.appendSingularField(b, "json_format", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption(b []byte) []byte { if len(*p) == 0 { return b @@ -422,6 +453,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) case 2: b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 50: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 3: b = p.appendSingularField(b, "verification", nil) } @@ -433,6 +466,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte { return b } switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -446,6 +481,10 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte { switch (*p)[0] { case 1: b = p.appendSingularField(b, "deprecated", nil) + case 2: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) + case 3: + b = p.appendSingularField(b, "debug_redact", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -461,12 +500,27 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 34: b = p.appendSingularField(b, "idempotency_level", nil) + case 35: + b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } return b } +func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 3: + b = p.appendSingularField(b, "edition", nil) + case 2: + b = p.appendSingularField(b, "value", nil) + } + return b +} + func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { if len(*p) == 0 { return b @@ -491,8 +545,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { b = p.appendSingularField(b, "full_name", nil) case 3: b = p.appendSingularField(b, "type", nil) - case 4: - b = p.appendSingularField(b, "is_repeated", nil) case 5: b = p.appendSingularField(b, "reserved", nil) case 6: diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 3867470d..60ff62b4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -12,7 +12,7 @@ package protoreflect // exactly identical. However, it is possible for the same semantically // identical proto type to be represented by multiple type descriptors. // -// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// For example, suppose we have t1 and t2 which are both an [MessageDescriptor]. // If t1 == t2, then the types are definitely equal and all accessors return // the same information. However, if t1 != t2, then it is still possible that // they still represent the same proto type (e.g., t1.FullName == t2.FullName). @@ -115,7 +115,7 @@ type Descriptor interface { // corresponds with the google.protobuf.FileDescriptorProto message. // // Top-level declarations: -// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor]. type FileDescriptor interface { Descriptor // Descriptor.FullName is identical to Package @@ -180,8 +180,8 @@ type FileImport struct { // corresponds with the google.protobuf.DescriptorProto message. // // Nested declarations: -// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, -// and/or MessageDescriptor. +// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor], +// and/or [MessageDescriptor]. type MessageDescriptor interface { Descriptor @@ -214,7 +214,7 @@ type MessageDescriptor interface { ExtensionRanges() FieldRanges // ExtensionRangeOptions returns the ith extension range options. // - // To avoid a dependency cycle, this method returns a proto.Message value, + // To avoid a dependency cycle, this method returns a proto.Message] value, // which always contains a google.protobuf.ExtensionRangeOptions message. // This method returns a typed nil-pointer if no options are present. // The caller must import the descriptorpb package to use this. @@ -231,9 +231,9 @@ type MessageDescriptor interface { } type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } -// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation. // It is recommended that implementations of this interface also implement the -// MessageFieldTypes interface. +// [MessageFieldTypes] interface. type MessageType interface { // New returns a newly allocated empty message. // It may return nil for synthetic messages representing a map entry. @@ -249,19 +249,19 @@ type MessageType interface { Descriptor() MessageDescriptor } -// MessageFieldTypes extends a MessageType by providing type information +// MessageFieldTypes extends a [MessageType] by providing type information // regarding enums and messages referenced by the message fields. type MessageFieldTypes interface { MessageType - // Enum returns the EnumType for the ith field in Descriptor.Fields. + // Enum returns the EnumType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not an enum kind. // It panics if out of bounds. // // Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum() Enum(i int) EnumType - // Message returns the MessageType for the ith field in Descriptor.Fields. + // Message returns the MessageType for the ith field in MessageDescriptor.Fields. // It returns nil if the ith field is not a message or group kind. // It panics if out of bounds. // @@ -286,8 +286,8 @@ type MessageDescriptors interface { // corresponds with the google.protobuf.FieldDescriptorProto message. // // It is used for both normal fields defined within the parent message -// (e.g., MessageDescriptor.Fields) and fields that extend some remote message -// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message +// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]). type FieldDescriptor interface { Descriptor @@ -344,7 +344,7 @@ type FieldDescriptor interface { // IsMap reports whether this field represents a map, // where the value type for the associated field is a Map. // It is equivalent to checking whether Cardinality is Repeated, - // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + // that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true. IsMap() bool // MapKey returns the field descriptor for the key in the map entry. @@ -419,7 +419,7 @@ type OneofDescriptor interface { // IsSynthetic reports whether this is a synthetic oneof created to support // proto3 optional semantics. If true, Fields contains exactly one field - // with HasOptionalKeyword specified. + // with FieldDescriptor.HasOptionalKeyword specified. IsSynthetic() bool // Fields is a list of fields belonging to this oneof. @@ -442,10 +442,10 @@ type OneofDescriptors interface { doNotImplement } -// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation. type ExtensionDescriptor = FieldDescriptor -// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType]. type ExtensionTypeDescriptor interface { ExtensionDescriptor @@ -470,12 +470,12 @@ type ExtensionDescriptors interface { doNotImplement } -// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete // Go implementation. The nested field descriptor must be for a extension field. // // While a normal field is a member of the parent message that it is declared -// within (see Descriptor.Parent), an extension field is a member of some other -// target message (see ExtensionDescriptor.Extendee) and may have no +// within (see [Descriptor.Parent]), an extension field is a member of some other +// target message (see [FieldDescriptor.ContainingMessage]) and may have no // relationship with the parent. However, the full name of an extension field is // relative to the parent that it is declared within. // @@ -532,7 +532,7 @@ type ExtensionType interface { // corresponds with the google.protobuf.EnumDescriptorProto message. // // Nested declarations: -// EnumValueDescriptor. +// [EnumValueDescriptor]. type EnumDescriptor interface { Descriptor @@ -548,7 +548,7 @@ type EnumDescriptor interface { } type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } -// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation. type EnumType interface { // New returns an instance of this enum type with its value set to n. New(n EnumNumber) Enum @@ -610,7 +610,7 @@ type EnumValueDescriptors interface { // ServiceDescriptor describes a service and // corresponds with the google.protobuf.ServiceDescriptorProto message. // -// Nested declarations: MethodDescriptor. +// Nested declarations: [MethodDescriptor]. type ServiceDescriptor interface { Descriptor diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index 37601b78..a7b0d06f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -27,16 +27,16 @@ type Enum interface { // Message is a reflective interface for a concrete message value, // encapsulating both type and value information for the message. // -// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// Accessor/mutators for individual fields are keyed by [FieldDescriptor]. // For non-extension fields, the descriptor must exactly match the // field known by the parent message. -// For extension fields, the descriptor must implement ExtensionTypeDescriptor, -// extend the parent message (i.e., have the same message FullName), and +// For extension fields, the descriptor must implement [ExtensionTypeDescriptor], +// extend the parent message (i.e., have the same message [FullName]), and // be within the parent's extension range. // -// Each field Value can be a scalar or a composite type (Message, List, or Map). -// See Value for the Go types associated with a FieldDescriptor. -// Providing a Value that is invalid or of an incorrect type panics. +// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]). +// See [Value] for the Go types associated with a [FieldDescriptor]. +// Providing a [Value] that is invalid or of an incorrect type panics. type Message interface { // Descriptor returns message descriptor, which contains only the protobuf // type information for the message. @@ -152,7 +152,7 @@ type Message interface { // This method may return nil. // // The returned methods type is identical to - // "google.golang.org/protobuf/runtime/protoiface".Methods. + // google.golang.org/protobuf/runtime/protoiface.Methods. // Consult the protoiface package documentation for details. ProtoMethods() *methods } @@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool { } // List is a zero-indexed, ordered list. -// The element Value type is determined by FieldDescriptor.Kind. -// Providing a Value that is invalid or of an incorrect type panics. +// The element [Value] type is determined by [FieldDescriptor.Kind]. +// Providing a [Value] that is invalid or of an incorrect type panics. type List interface { // Len reports the number of entries in the List. // Get, Set, and Truncate panic with out of bound indexes. @@ -226,9 +226,9 @@ type List interface { } // Map is an unordered, associative map. -// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. -// The entry Value type is determined by FieldDescriptor.MapValue.Kind. -// Providing a MapKey or Value that is invalid or of an incorrect type panics. +// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind. +// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind. +// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics. type Map interface { // Len reports the number of elements in the map. Len() int diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go index 59165254..654599d4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -24,19 +24,19 @@ import ( // Unlike the == operator, a NaN is equal to another NaN. // // - Enums are equal if they contain the same number. -// Since Value does not contain an enum descriptor, +// Since [Value] does not contain an enum descriptor, // enum values do not consider the type of the enum. // // - Other scalar values are equal if they contain the same value. // -// - Message values are equal if they belong to the same message descriptor, +// - [Message] values are equal if they belong to the same message descriptor, // have the same set of populated known and extension field values, // and the same set of unknown fields values. // -// - Lists are equal if they are the same length and +// - [List] values are equal if they are the same length and // each corresponding element is equal. // -// - Maps are equal if they have the same set of keys and +// - [Map] values are equal if they have the same set of keys and // the corresponding value for each key is equal. func (v1 Value) Equal(v2 Value) bool { return equalValue(v1, v2) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 08e5ef73..16030973 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -11,7 +11,7 @@ import ( // Value is a union where only one Go type may be set at a time. // The Value is used to represent all possible values a field may take. -// The following shows which Go type is used to represent each proto Kind: +// The following shows which Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -31,22 +31,22 @@ import ( // // Multiple protobuf Kinds may be represented by a single Go type if the type // can losslessly represent the information for the proto kind. For example, -// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64, // but use different integer encoding methods. // -// The List or Map types are used if the field cardinality is repeated. -// A field is a List if FieldDescriptor.IsList reports true. -// A field is a Map if FieldDescriptor.IsMap reports true. +// The [List] or [Map] types are used if the field cardinality is repeated. +// A field is a [List] if [FieldDescriptor.IsList] reports true. +// A field is a [Map] if [FieldDescriptor.IsMap] reports true. // // Converting to/from a Value and a concrete Go value panics on type mismatch. -// For example, ValueOf("hello").Int() panics because this attempts to +// For example, [ValueOf]("hello").Int() panics because this attempts to // retrieve an int64 from a string. // -// List, Map, and Message Values are called "composite" values. +// [List], [Map], and [Message] Values are called "composite" values. // // A composite Value may alias (reference) memory at some location, // such that changes to the Value updates the that location. -// A composite value acquired with a Mutable method, such as Message.Mutable, +// A composite value acquired with a Mutable method, such as [Message.Mutable], // always references the source object. // // For example: @@ -65,7 +65,7 @@ import ( // // appending to the List here may or may not modify the message. // list.Append(protoreflect.ValueOfInt32(0)) // -// Some operations, such as Message.Get, may return an "empty, read-only" +// Some operations, such as [Message.Get], may return an "empty, read-only" // composite Value. Modifying an empty, read-only value panics. type Value value @@ -306,7 +306,7 @@ func (v Value) Float() float64 { } } -// String returns v as a string. Since this method implements fmt.Stringer, +// String returns v as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (v Value) String() string { switch v.typ { @@ -327,7 +327,7 @@ func (v Value) Bytes() []byte { } } -// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber]. func (v Value) Enum() EnumNumber { switch v.typ { case enumType: @@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber { } } -// Message returns v as a Message and panics if the type is not a Message. +// Message returns v as a [Message] and panics if the type is not a [Message]. func (v Value) Message() Message { switch vi := v.getIface().(type) { case Message: @@ -347,7 +347,7 @@ func (v Value) Message() Message { } } -// List returns v as a List and panics if the type is not a List. +// List returns v as a [List] and panics if the type is not a [List]. func (v Value) List() List { switch vi := v.getIface().(type) { case List: @@ -357,7 +357,7 @@ func (v Value) List() List { } } -// Map returns v as a Map and panics if the type is not a Map. +// Map returns v as a [Map] and panics if the type is not a [Map]. func (v Value) Map() Map { switch vi := v.getIface().(type) { case Map: @@ -367,7 +367,7 @@ func (v Value) Map() Map { } } -// MapKey returns v as a MapKey and panics for invalid MapKey types. +// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types. func (v Value) MapKey() MapKey { switch v.typ { case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: @@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey { } // MapKey is used to index maps, where the Go type of the MapKey must match -// the specified key Kind (see MessageDescriptor.IsMapEntry). -// The following shows what Go type is used to represent each proto Kind: +// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]). +// The following shows what Go type is used to represent each proto [Kind]: // // â•”â•â•â•â•â•â•â•â•â•â•¤â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•— // â•‘ Go type │ Protobuf kind â•‘ @@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey { // â•‘ string │ StringKind â•‘ // â•šâ•â•â•â•â•â•â•â•â•â•§â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â•â• // -// A MapKey is constructed and accessed through a Value: +// A MapKey is constructed and accessed through a [Value]: // // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // -// The MapKey is a strict subset of valid types used in Value; -// converting a Value to a MapKey with an invalid type panics. +// The MapKey is a strict subset of valid types used in [Value]; +// converting a [Value] to a MapKey with an invalid type panics. type MapKey value // IsValid reports whether k is populated with a value. @@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 { return Value(k).Uint() } -// String returns k as a string. Since this method implements fmt.Stringer, +// String returns k as a string. Since this method implements [fmt.Stringer], // this returns the formatted string value for any non-string type. func (k MapKey) String() string { return Value(k).String() } -// Value returns k as a Value. +// Value returns k as a [Value]. func (k MapKey) Value() Value { return Value(k) } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go similarity index 97% rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 702ddf22..b1fdbe3e 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine +//go:build !purego && !appengine && !go1.21 +// +build !purego,!appengine,!go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go new file mode 100644 index 00000000..43547011 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !purego && !appengine && go1.21 +// +build !purego,!appengine,go1.21 + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + ifaceHeader struct { + _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it. + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() string { + return unsafe.String((*byte)(v.ptr), v.num) +} +func (v Value) getBytes() []byte { + return unsafe.Slice((*byte)(v.ptr), v.num) +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index aeb55977..6267dc52 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -5,12 +5,12 @@ // Package protoregistry provides data structures to register and lookup // protobuf descriptor types. // -// The Files registry contains file descriptors and provides the ability +// The [Files] registry contains file descriptors and provides the ability // to iterate over the files or lookup a specific descriptor within the files. -// Files only contains protobuf descriptors and has no understanding of Go +// [Files] only contains protobuf descriptors and has no understanding of Go // type information that may be associated with each descriptor. // -// The Types registry contains descriptor types for which there is a known +// The [Types] registry contains descriptor types for which there is a known // Go type associated with that descriptor. It provides the ability to iterate // over the registered types or lookup a type by name. package protoregistry @@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) { // FindDescriptorByName looks up a descriptor by the full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { if r == nil { return nil, NotFound @@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) { // FindFileByPath looks up a file by the path. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. // This returns an error if multiple files have the same path. func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { if r == nil { @@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type MessageTypeResolver interface { // FindMessageByName looks up a message by its full name. // E.g., "google.protobuf.Any" @@ -451,7 +451,7 @@ type MessageTypeResolver interface { // A compliant implementation must deterministically return the same type // if no error is encountered. // -// The Types type implements this interface. +// The [Types] type implements this interface. type ExtensionTypeResolver interface { // FindExtensionByName looks up a extension field by the field's full name. // Note that this is the full name of the field as determined by @@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac // FindEnumByName looks up an enum by its full name. // E.g., "google.protobuf.Field.Kind". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { if r == nil { return nil, NotFound @@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp // FindMessageByName looks up a message by its full name, // e.g. "google.protobuf.Any". // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { if r == nil { return nil, NotFound @@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M // FindMessageByURL looks up a message by a URL identifier. // See documentation on google.protobuf.Any.type_url for the URL format. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // This function is similar to FindMessageByName but // truncates anything before and including '/' in the URL. @@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { // where the extension is declared and is unrelated to the full name of the // message being extended. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound @@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E // FindExtensionByNumber looks up a extension field by the field number // within some parent message, identified by full name. // -// This returns (nil, NotFound) if not found. +// This returns (nil, [NotFound]) if not found. func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { if r == nil { return nil, NotFound diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 04c00f73..38daa858 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,94 @@ import ( sync "sync" ) +// The full set of known editions. +type Edition int32 + +const ( + // A placeholder for an unknown edition value. + Edition_EDITION_UNKNOWN Edition = 0 + // Legacy syntax "editions". These pre-date editions, but behave much like + // distinct editions. These can't be used to specify the edition of proto + // files, but feature definitions must supply proto2/proto3 defaults for + // backwards compatibility. + Edition_EDITION_PROTO2 Edition = 998 + Edition_EDITION_PROTO3 Edition = 999 + // Editions that have been released. The specific values are arbitrary and + // should not be depended on, but they will always be time-ordered for easy + // comparison. + Edition_EDITION_2023 Edition = 1000 + // Placeholder editions for testing feature resolution. These should not be + // used or relyed on outside of tests. + Edition_EDITION_1_TEST_ONLY Edition = 1 + Edition_EDITION_2_TEST_ONLY Edition = 2 + Edition_EDITION_99997_TEST_ONLY Edition = 99997 + Edition_EDITION_99998_TEST_ONLY Edition = 99998 + Edition_EDITION_99999_TEST_ONLY Edition = 99999 +) + +// Enum value maps for Edition. +var ( + Edition_name = map[int32]string{ + 0: "EDITION_UNKNOWN", + 998: "EDITION_PROTO2", + 999: "EDITION_PROTO3", + 1000: "EDITION_2023", + 1: "EDITION_1_TEST_ONLY", + 2: "EDITION_2_TEST_ONLY", + 99997: "EDITION_99997_TEST_ONLY", + 99998: "EDITION_99998_TEST_ONLY", + 99999: "EDITION_99999_TEST_ONLY", + } + Edition_value = map[string]int32{ + "EDITION_UNKNOWN": 0, + "EDITION_PROTO2": 998, + "EDITION_PROTO3": 999, + "EDITION_2023": 1000, + "EDITION_1_TEST_ONLY": 1, + "EDITION_2_TEST_ONLY": 2, + "EDITION_99997_TEST_ONLY": 99997, + "EDITION_99998_TEST_ONLY": 99998, + "EDITION_99999_TEST_ONLY": 99999, + } +) + +func (x Edition) Enum() *Edition { + p := new(Edition) + *p = x + return p +} + +func (x Edition) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Edition) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (Edition) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x Edition) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *Edition) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = Edition(num) + return nil +} + +// Deprecated: Use Edition.Descriptor instead. +func (Edition) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + // The verification state of the extension range. type ExtensionRangeOptions_VerificationState int32 @@ -80,11 +168,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string { } func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { @@ -125,9 +213,10 @@ const ( FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 + // Group type is deprecated and not supported after google.protobuf. However, Proto3 // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. + // treat group fields as unknown fields. In Editions, the group wire format + // can be enabled via the `message_encoding` feature. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. // New in version 2. @@ -195,11 +284,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -226,21 +315,24 @@ type FieldDescriptorProto_Label int32 const ( // 0 is reserved for errors FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 + // The required label is only allowed in google.protobuf. In proto3 and Editions + // it's explicitly prohibited. In Editions, the `field_presence` feature + // can be used to get this behavior. + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 ) // Enum value maps for FieldDescriptorProto_Label. var ( FieldDescriptorProto_Label_name = map[int32]string{ 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", 3: "LABEL_REPEATED", + 2: "LABEL_REQUIRED", } FieldDescriptorProto_Label_value = map[string]int32{ "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, "LABEL_REPEATED": 3, + "LABEL_REQUIRED": 2, } ) @@ -255,11 +347,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -316,11 +408,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -382,11 +474,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -444,11 +536,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[6] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -506,11 +598,11 @@ func (x FieldOptions_OptionRetention) String() string { } func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() } func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[6] + return &file_google_protobuf_descriptor_proto_enumTypes[7] } func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { @@ -590,11 +682,11 @@ func (x FieldOptions_OptionTargetType) String() string { } func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[7] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { @@ -652,11 +744,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[8] + return &file_google_protobuf_descriptor_proto_enumTypes[9] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -678,6 +770,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +type FeatureSet_FieldPresence int32 + +const ( + FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0 + FeatureSet_EXPLICIT FeatureSet_FieldPresence = 1 + FeatureSet_IMPLICIT FeatureSet_FieldPresence = 2 + FeatureSet_LEGACY_REQUIRED FeatureSet_FieldPresence = 3 +) + +// Enum value maps for FeatureSet_FieldPresence. +var ( + FeatureSet_FieldPresence_name = map[int32]string{ + 0: "FIELD_PRESENCE_UNKNOWN", + 1: "EXPLICIT", + 2: "IMPLICIT", + 3: "LEGACY_REQUIRED", + } + FeatureSet_FieldPresence_value = map[string]int32{ + "FIELD_PRESENCE_UNKNOWN": 0, + "EXPLICIT": 1, + "IMPLICIT": 2, + "LEGACY_REQUIRED": 3, + } +) + +func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence { + p := new(FeatureSet_FieldPresence) + *p = x + return p +} + +func (x FeatureSet_FieldPresence) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor() +} + +func (FeatureSet_FieldPresence) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[10] +} + +func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_FieldPresence(num) + return nil +} + +// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead. +func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +type FeatureSet_EnumType int32 + +const ( + FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0 + FeatureSet_OPEN FeatureSet_EnumType = 1 + FeatureSet_CLOSED FeatureSet_EnumType = 2 +) + +// Enum value maps for FeatureSet_EnumType. +var ( + FeatureSet_EnumType_name = map[int32]string{ + 0: "ENUM_TYPE_UNKNOWN", + 1: "OPEN", + 2: "CLOSED", + } + FeatureSet_EnumType_value = map[string]int32{ + "ENUM_TYPE_UNKNOWN": 0, + "OPEN": 1, + "CLOSED": 2, + } +) + +func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType { + p := new(FeatureSet_EnumType) + *p = x + return p +} + +func (x FeatureSet_EnumType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor() +} + +func (FeatureSet_EnumType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[11] +} + +func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnumType(num) + return nil +} + +// Deprecated: Use FeatureSet_EnumType.Descriptor instead. +func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1} +} + +type FeatureSet_RepeatedFieldEncoding int32 + +const ( + FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0 + FeatureSet_PACKED FeatureSet_RepeatedFieldEncoding = 1 + FeatureSet_EXPANDED FeatureSet_RepeatedFieldEncoding = 2 +) + +// Enum value maps for FeatureSet_RepeatedFieldEncoding. +var ( + FeatureSet_RepeatedFieldEncoding_name = map[int32]string{ + 0: "REPEATED_FIELD_ENCODING_UNKNOWN", + 1: "PACKED", + 2: "EXPANDED", + } + FeatureSet_RepeatedFieldEncoding_value = map[string]int32{ + "REPEATED_FIELD_ENCODING_UNKNOWN": 0, + "PACKED": 1, + "EXPANDED": 2, + } +) + +func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding { + p := new(FeatureSet_RepeatedFieldEncoding) + *p = x + return p +} + +func (x FeatureSet_RepeatedFieldEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor() +} + +func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[12] +} + +func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_RepeatedFieldEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead. +func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2} +} + +type FeatureSet_Utf8Validation int32 + +const ( + FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0 + FeatureSet_NONE FeatureSet_Utf8Validation = 1 + FeatureSet_VERIFY FeatureSet_Utf8Validation = 2 +) + +// Enum value maps for FeatureSet_Utf8Validation. +var ( + FeatureSet_Utf8Validation_name = map[int32]string{ + 0: "UTF8_VALIDATION_UNKNOWN", + 1: "NONE", + 2: "VERIFY", + } + FeatureSet_Utf8Validation_value = map[string]int32{ + "UTF8_VALIDATION_UNKNOWN": 0, + "NONE": 1, + "VERIFY": 2, + } +) + +func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation { + p := new(FeatureSet_Utf8Validation) + *p = x + return p +} + +func (x FeatureSet_Utf8Validation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor() +} + +func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[13] +} + +func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_Utf8Validation(num) + return nil +} + +// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead. +func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3} +} + +type FeatureSet_MessageEncoding int32 + +const ( + FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0 + FeatureSet_LENGTH_PREFIXED FeatureSet_MessageEncoding = 1 + FeatureSet_DELIMITED FeatureSet_MessageEncoding = 2 +) + +// Enum value maps for FeatureSet_MessageEncoding. +var ( + FeatureSet_MessageEncoding_name = map[int32]string{ + 0: "MESSAGE_ENCODING_UNKNOWN", + 1: "LENGTH_PREFIXED", + 2: "DELIMITED", + } + FeatureSet_MessageEncoding_value = map[string]int32{ + "MESSAGE_ENCODING_UNKNOWN": 0, + "LENGTH_PREFIXED": 1, + "DELIMITED": 2, + } +) + +func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding { + p := new(FeatureSet_MessageEncoding) + *p = x + return p +} + +func (x FeatureSet_MessageEncoding) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor() +} + +func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[14] +} + +func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_MessageEncoding(num) + return nil +} + +// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead. +func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4} +} + +type FeatureSet_JsonFormat int32 + +const ( + FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0 + FeatureSet_ALLOW FeatureSet_JsonFormat = 1 + FeatureSet_LEGACY_BEST_EFFORT FeatureSet_JsonFormat = 2 +) + +// Enum value maps for FeatureSet_JsonFormat. +var ( + FeatureSet_JsonFormat_name = map[int32]string{ + 0: "JSON_FORMAT_UNKNOWN", + 1: "ALLOW", + 2: "LEGACY_BEST_EFFORT", + } + FeatureSet_JsonFormat_value = map[string]int32{ + "JSON_FORMAT_UNKNOWN": 0, + "ALLOW": 1, + "LEGACY_BEST_EFFORT": 2, + } +) + +func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat { + p := new(FeatureSet_JsonFormat) + *p = x + return p +} + +func (x FeatureSet_JsonFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor() +} + +func (FeatureSet_JsonFormat) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[15] +} + +func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_JsonFormat(num) + return nil +} + +// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead. +func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -716,11 +1165,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[9] + return &file_google_protobuf_descriptor_proto_enumTypes[16] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -739,7 +1188,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -822,8 +1271,8 @@ type FileDescriptorProto struct { // // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - // The edition of the proto file, which is an opaque string. - Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` + // The edition of the proto file. + Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -942,11 +1391,11 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } -func (x *FileDescriptorProto) GetEdition() string { +func (x *FileDescriptorProto) GetEdition() Edition { if x != nil && x.Edition != nil { return *x.Edition } - return "" + return Edition_EDITION_UNKNOWN } // Describes a message type. @@ -1079,13 +1528,14 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - // go/protobuf-stripping-extension-declarations - // Like Metadata, but we use a repeated field to hold all extension - // declarations. This should avoid the size increases of transforming a large - // extension range into small ranges in generated binaries. + // For external users: DO NOT USE. We are in the process of open sourcing + // extension declaration and executing internal cleanups before it can be + // used externally. Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The verification state of the range. - // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // TODO: flip the default to DECLARATION once all empty ranges // are marked as UNVERIFIED. Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` } @@ -1141,6 +1591,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar return nil } +func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { if x != nil && x.Verification != nil { return *x.Verification @@ -1772,6 +2229,8 @@ type FileOptions struct { // is empty. When this option is not set, the package name will be used for // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -1963,6 +2422,13 @@ func (x *FileOptions) GetRubyPackage() string { return "" } +func (x *FileOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2039,11 +2505,13 @@ type MessageOptions struct { // This should only be used as a temporary measure against broken builds due // to the change in behavior for JSON field name conflicts. // - // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // TODO This is legacy behavior we plan to remove once downstream // teams have had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2123,6 +2591,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *MessageOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2147,7 +2622,9 @@ type FieldOptions struct { // a more efficient representation on the wire. Rather than repeatedly // writing the tag and type for each element, the entire array is encoded as // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. + // false will avoid using packed encoding. This option is prohibited in + // Editions, but the `repeated_field_encoding` feature can be used to control + // the behavior. Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types @@ -2205,11 +2682,12 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // Indicate that the field value should not be printed out when using debug // formats, e.g. when the field contains sensitive credentials. - DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` - Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` - Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` + EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2320,17 +2798,23 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { return FieldOptions_RETENTION_UNKNOWN } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { - if x != nil && x.Target != nil { - return *x.Target +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets } - return FieldOptions_TARGET_TYPE_UNKNOWN + return nil } -func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { +func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault { if x != nil { - return x.Targets + return x.EditionDefaults + } + return nil +} + +func (x *FieldOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features } return nil } @@ -2348,6 +2832,8 @@ type OneofOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2384,6 +2870,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} } +func (x *OneofOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2409,11 +2902,13 @@ type EnumOptions struct { // and strips underscored from the fields before comparison in proto3 only. // The new behavior takes `json_name` into account and applies to proto2 as // well. - // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // TODO Remove this legacy behavior once downstream teams have // had time to migrate. // // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2477,6 +2972,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { return false } +func (x *EnumOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2495,13 +2997,20 @@ type EnumValueOptions struct { // for the enum value, or it will be completely ignored; in the very least, // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` + // Indicate that fields annotated with this enum value should not be printed + // out when using debug formats, e.g. when the field contains sensitive + // credentials. + DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for EnumValueOptions fields. const ( - Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_Deprecated = bool(false) + Default_EnumValueOptions_DebugRedact = bool(false) ) func (x *EnumValueOptions) Reset() { @@ -2543,6 +3052,20 @@ func (x *EnumValueOptions) GetDeprecated() bool { return Default_EnumValueOptions_Deprecated } +func (x *EnumValueOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + +func (x *EnumValueOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_EnumValueOptions_DebugRedact +} + func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2556,6 +3079,8 @@ type ServiceOptions struct { unknownFields protoimpl.UnknownFields extensionFields protoimpl.ExtensionFields + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations // for the service, or it will be completely ignored; in the very least, @@ -2602,6 +3127,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} } +func (x *ServiceOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *ServiceOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -2628,6 +3160,8 @@ type MethodOptions struct { // this is a formalization for deprecating methods. Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // Any features defined in the specific edition. + Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2684,6 +3218,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { return Default_MethodOptions_IdempotencyLevel } +func (x *MethodOptions) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2763,35 +3304,200 @@ func (x *UninterpretedOption) GetPositiveIntValue() uint64 { if x != nil && x.PositiveIntValue != nil { return *x.PositiveIntValue } - return 0 + return 0 +} + +func (x *UninterpretedOption) GetNegativeIntValue() int64 { + if x != nil && x.NegativeIntValue != nil { + return *x.NegativeIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// TODO Enums in C++ gencode (and potentially other languages) are +// not well scoped. This means that each of the feature enums below can clash +// with each other. The short names we've chosen maximize call-site +// readability, but leave us very open to this scenario. A future feature will +// be designed and implemented to handle this, hopefully before we ever hit a +// conflict here. +type FeatureSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"` + EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"` + RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"` + Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` + MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` + JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` +} + +func (x *FeatureSet) Reset() { + *x = FeatureSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSet) ProtoMessage() {} + +func (x *FeatureSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead. +func (*FeatureSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence { + if x != nil && x.FieldPresence != nil { + return *x.FieldPresence + } + return FeatureSet_FIELD_PRESENCE_UNKNOWN +} + +func (x *FeatureSet) GetEnumType() FeatureSet_EnumType { + if x != nil && x.EnumType != nil { + return *x.EnumType + } + return FeatureSet_ENUM_TYPE_UNKNOWN +} + +func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding { + if x != nil && x.RepeatedFieldEncoding != nil { + return *x.RepeatedFieldEncoding + } + return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation { + if x != nil && x.Utf8Validation != nil { + return *x.Utf8Validation + } + return FeatureSet_UTF8_VALIDATION_UNKNOWN +} + +func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding { + if x != nil && x.MessageEncoding != nil { + return *x.MessageEncoding + } + return FeatureSet_MESSAGE_ENCODING_UNKNOWN +} + +func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { + if x != nil && x.JsonFormat != nil { + return *x.JsonFormat + } + return FeatureSet_JSON_FORMAT_UNKNOWN +} + +// A compiled specification for the defaults of a set of features. These +// messages are generated from FeatureSet extensions and can be used to seed +// feature resolution. The resolution with this object becomes a simple search +// for the closest matching edition, followed by proto merges. +type FeatureSetDefaults struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"` + // The minimum supported edition (inclusive) when this was constructed. + // Editions before this will not have defaults. + MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"` + // The maximum known edition (inclusive) when this was constructed. Editions + // after this will not have reliable defaults. + MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"` +} + +func (x *FeatureSetDefaults) Reset() { + *x = FeatureSetDefaults{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (x *UninterpretedOption) GetNegativeIntValue() int64 { - if x != nil && x.NegativeIntValue != nil { - return *x.NegativeIntValue - } - return 0 +func (x *FeatureSetDefaults) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *UninterpretedOption) GetDoubleValue() float64 { - if x != nil && x.DoubleValue != nil { - return *x.DoubleValue +func (*FeatureSetDefaults) ProtoMessage() {} + +func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (x *UninterpretedOption) GetStringValue() []byte { +// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault { if x != nil { - return x.StringValue + return x.Defaults } return nil } -func (x *UninterpretedOption) GetAggregateValue() string { - if x != nil && x.AggregateValue != nil { - return *x.AggregateValue +func (x *FeatureSetDefaults) GetMinimumEdition() Edition { + if x != nil && x.MinimumEdition != nil { + return *x.MinimumEdition } - return "" + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults) GetMaximumEdition() Edition { + if x != nil && x.MaximumEdition != nil { + return *x.MaximumEdition + } + return Edition_EDITION_UNKNOWN } // Encapsulates information about the original source file from which a @@ -2855,7 +3561,7 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2868,7 +3574,7 @@ func (x *SourceCodeInfo) String() string { func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2881,7 +3587,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21} } func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { @@ -2907,7 +3613,7 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2920,7 +3626,7 @@ func (x *GeneratedCodeInfo) String() string { func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2933,7 +3639,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22} } func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { @@ -2956,7 +3662,7 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2969,7 +3675,7 @@ func (x *DescriptorProto_ExtensionRange) String() string { func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3021,7 +3727,7 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3034,7 +3740,7 @@ func (x *DescriptorProto_ReservedRange) String() string { func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3078,10 +3784,6 @@ type ExtensionRangeOptions_Declaration struct { // Metadata.type, Declaration.type must have a leading dot for messages // and enums. Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` - // Deprecated. Please use "repeated". - // - // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. - IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` // If true, indicates that the number is reserved in the extension range, // and any extension field with the number will fail to compile. Set this // when a declared extension field is deleted. @@ -3094,7 +3796,7 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3107,7 +3809,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string { func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3144,14 +3846,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string { return "" } -// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. -func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { - if x != nil && x.IsRepeated != nil { - return *x.IsRepeated - } - return false -} - func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { if x != nil && x.Reserved != nil { return *x.Reserved @@ -3184,7 +3878,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3197,7 +3891,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3227,6 +3921,61 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { return 0 } +type FieldOptions_EditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value. +} + +func (x *FieldOptions_EditionDefault) Reset() { + *x = FieldOptions_EditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions_EditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions_EditionDefault) ProtoMessage() {} + +func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead. +func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +func (x *FieldOptions_EditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FieldOptions_EditionDefault) GetValue() string { + if x != nil && x.Value != nil { + return *x.Value + } + return "" +} + // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). @@ -3244,7 +3993,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3257,7 +4006,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3287,6 +4036,65 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool { return false } +// A map from every known edition with a unique set of defaults to its +// defaults. Not all editions may be contained here. For a given edition, +// the defaults at the closest matching edition ordered at or before it should +// be used. This field must be in strict ascending order by edition. +type FeatureSetDefaults_FeatureSetEditionDefault struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` + Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { + *x = FeatureSetDefaults_FeatureSetEditionDefault{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead. +func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition { + if x != nil && x.Edition != nil { + return *x.Edition + } + return Edition_EDITION_UNKNOWN +} + +func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet { + if x != nil { + return x.Features + } + return nil +} + type SourceCodeInfo_Location struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3388,7 +4196,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3401,7 +4209,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3414,7 +4222,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0} } func (x *SourceCodeInfo_Location) GetPath() []int32 { @@ -3475,7 +4283,7 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +4296,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3501,7 +4309,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0} } func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -3550,7 +4358,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3588,527 +4396,687 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, - 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, + 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, - 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, - 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, + 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, + 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, - 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, - 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, - 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, - 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, - 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, - 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, - 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, - 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, - 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, - 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, - 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, - 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, - 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, - 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, - 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, - 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, - 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, - 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, - 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, - 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, - 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, - 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, + 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, + 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, - 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, + 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b, + 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04, + 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, + 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, + 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, + 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, + 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, + 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, + 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, + 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, - 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca, + 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, + 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, + 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, + 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, + 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, + 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, - 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, - 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, - 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, - 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, - 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, - 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, - 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, - 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, - 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, - 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, - 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, - 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, - 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, - 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, - 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, - 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, - 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, - 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, - 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, - 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, - 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, - 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, - 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, - 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, - 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, - 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, - 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, - 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, - 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, - 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, - 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, - 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, - 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, - 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, - 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, - 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, - 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, - 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, - 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, - 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, - 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, - 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, - 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, + 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, + 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, + 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, + 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, + 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, + 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, + 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, + 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, - 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, - 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, + 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, + 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, + 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, - 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, - 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, - 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, - 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, - 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, - 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, - 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, + 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, + 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, + 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, + 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, + 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, + 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, + 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, + 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, + 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, + 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, + 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, + 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a, + 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, + 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, + 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, + 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, + 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, + 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, + 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, + 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, + 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, + 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, + 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, + 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, + 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, + 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, + 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, + 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, + 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, + 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, + 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, + 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, + 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, + 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, + 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, + 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, + 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, + 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, + 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, + 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, + 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, + 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, + 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, + 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, + 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, + 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, + 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, + 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, + 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, + 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46, + 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88, + 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, + 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06, + 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50, + 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, + 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, + 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, + 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07, + 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e, + 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, + 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, + 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, + 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, + 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, - 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, - 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, - 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, - 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, - 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, - 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, - 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, - 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, - 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, - 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, - 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, - 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, + 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, + 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, + 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, + 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, + 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44, + 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, + 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12, + 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, + 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a, + 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, + 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50, + 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, + 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a, + 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, + 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, + 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, + 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, + 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, + 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, + 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, + 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, + 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, + 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, + 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, + 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, + 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, + 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, + 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, + 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, + 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, + 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, + 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, + 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, + 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, + 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, + 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, + 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, + 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, + 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, + 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, + 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, + 0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, + 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, + 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, } var ( @@ -4123,103 +5091,136 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState - (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType - (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention - (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType - (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel - (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 20: google.protobuf.FileOptions - (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation + (Edition)(0), // 0: google.protobuf.Edition + (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel + (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence + (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType + (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding + (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation + (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding + (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat + (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 27: google.protobuf.FileOptions + (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault + (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState - 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention - 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType - 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 49, // [49:49] is the sub-list for method output_type - 49, // [49:49] is the sub-list for method input_type - 49, // [49:49] is the sub-list for extension type_name - 49, // [49:49] is the sub-list for extension extendee - 0, // [0:49] is the sub-list for field type_name + 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition + 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence + 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType + 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding + 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation + 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding + 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat + 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet + 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 71, // [71:71] is the sub-list for method output_type + 71, // [71:71] is the sub-list for method input_type + 71, // [71:71] is the sub-list for extension type_name + 71, // [71:71] is the sub-list for extension extendee + 0, // [0:71] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -4475,19 +5476,21 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo); i { + switch v := v.(*FeatureSet); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields + case 3: + return &v.extensionFields default: return nil } } file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GeneratedCodeInfo); i { + switch v := v.(*FeatureSetDefaults); i { case 0: return &v.state case 1: @@ -4499,7 +5502,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ExtensionRange); i { + switch v := v.(*SourceCodeInfo); i { case 0: return &v.state case 1: @@ -4511,7 +5514,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescriptorProto_ReservedRange); i { + switch v := v.(*GeneratedCodeInfo); i { case 0: return &v.state case 1: @@ -4523,7 +5526,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionRangeOptions_Declaration); i { + switch v := v.(*DescriptorProto_ExtensionRange); i { case 0: return &v.state case 1: @@ -4535,7 +5538,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*DescriptorProto_ReservedRange); i { case 0: return &v.state case 1: @@ -4547,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -4559,7 +5562,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -4571,6 +5574,54 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions_EditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -4588,8 +5639,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 10, - NumMessages: 28, + NumEnums: 17, + NumMessages: 32, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 580b232f..9de51be5 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -237,7 +237,8 @@ type Any struct { // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. + // type.googleapis.com. As of May 2023, there are no widely used type server + // implementations and no plans to implement one. // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. diff --git a/vendor/howett.net/plist/.gitignore b/vendor/howett.net/plist/.gitignore new file mode 100644 index 00000000..3743b346 --- /dev/null +++ b/vendor/howett.net/plist/.gitignore @@ -0,0 +1,16 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +*.wasm + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ diff --git a/vendor/howett.net/plist/README.md b/vendor/howett.net/plist/README.md index a13e29a3..d751c062 100644 --- a/vendor/howett.net/plist/README.md +++ b/vendor/howett.net/plist/README.md @@ -1,4 +1,4 @@ -# plist - A pure Go property list transcoder [![coverage report](https://gitlab.howett.net/go/plist/badges/master/coverage.svg)](https://gitlab.howett.net/go/plist/commits/master) +# plist - A pure Go property list transcoder [![coverage report](https://gitlab.howett.net/go/plist/badges/main/coverage.svg)](https://gitlab.howett.net/go/plist/commits/main) ## INSTALL ``` $ go get howett.net/plist diff --git a/vendor/howett.net/plist/bplist_parser.go b/vendor/howett.net/plist/bplist_parser.go index 1825b570..8bb164e1 100644 --- a/vendor/howett.net/plist/bplist_parser.go +++ b/vendor/howett.net/plist/bplist_parser.go @@ -137,7 +137,10 @@ func (p *bplistParser) parseSizedInteger(off offset, nbytes int) (lo uint64, hi case 16: lo, hi = binary.BigEndian.Uint64(p.buffer[off+8:]), binary.BigEndian.Uint64(p.buffer[off:]) default: - panic(errors.New("illegal integer size")) + if nbytes > 8 { + panic(errors.New("illegal integer size")) + } + lo, hi = binary.BigEndian.Uint64(p.buffer[off-(8-offset(nbytes)):]) & ((1<integer) +const cfUIDMagic = "CF$UID" + type cfValue interface { typeName() string hash() interface{} @@ -41,6 +46,24 @@ func (p *cfDictionary) sort() { sort.Sort(p) } +func (p *cfDictionary) maybeUID(lax bool) cfValue { + if len(p.keys) == 1 && p.keys[0] == "CF$UID" && len(p.values) == 1 { + pval := p.values[0] + if integer, ok := pval.(*cfNumber); ok { + return cfUID(integer.value) + } + // Openstep only has cfString. Act like the unmarshaller a bit. + if lax { + if str, ok := pval.(cfString); ok { + if i, err := strconv.ParseUint(string(str), 10, 64); err == nil { + return cfUID(i) + } + } + } + } + return p +} + type cfArray struct { values []cfValue } @@ -115,6 +138,16 @@ func (p cfUID) hash() interface{} { return p } +func (p cfUID) toDict() *cfDictionary { + return &cfDictionary{ + keys: []string{cfUIDMagic}, + values: []cfValue{&cfNumber{ + signed: false, + value: uint64(p), + }}, + } +} + type cfData []byte func (cfData) typeName() string { diff --git a/vendor/howett.net/plist/text_generator.go b/vendor/howett.net/plist/text_generator.go index 53078ba5..d71f02bb 100644 --- a/vendor/howett.net/plist/text_generator.go +++ b/vendor/howett.net/plist/text_generator.go @@ -197,6 +197,8 @@ func (p *textPlistGenerator) writePlistValue(pval cfValue) { } else { io.WriteString(p.writer, p.plistQuotedString(time.Time(pval).In(time.UTC).Format(textPlistTimeLayout))) } + case cfUID: + p.writePlistValue(pval.toDict()) } } diff --git a/vendor/howett.net/plist/text_parser.go b/vendor/howett.net/plist/text_parser.go index 7e49d6f7..56401832 100644 --- a/vendor/howett.net/plist/text_parser.go +++ b/vendor/howett.net/plist/text_parser.go @@ -1,6 +1,12 @@ +// Parser for text plist formats. +// @see https://github.com/apple/swift-corelibs-foundation/blob/master/CoreFoundation/Parsing.subproj/CFOldStylePList.c +// @see https://github.com/gnustep/libs-base/blob/master/Source/NSPropertyList.m +// This parser also handles strings files. + package plist import ( + "encoding/base64" "encoding/binary" "errors" "fmt" @@ -90,6 +96,8 @@ func (p *textPlistParser) parseDocument() (pval cfValue, parseError error) { p.error("garbage after end of document") } + // Try parsing as .strings. + // See -[NSDictionary propertyListFromStringsFileFormat:]. p.start = 0 p.pos = 0 val = p.parseDictionary(true) @@ -257,9 +265,9 @@ func (p *textPlistParser) parseEscape() string { s = `\` case '"': s = `"` - case 'x': + case 'x': // This is our extension. s = string(rune(p.parseHexDigits(2))) - case 'u', 'U': + case 'u', 'U': // 'u' is a GNUstep extension. s = string(rune(p.parseHexDigits(4))) case '0', '1', '2', '3', '4', '5', '6', '7': p.backup() // we've already consumed one of the digits @@ -312,7 +320,7 @@ func (p *textPlistParser) parseUnquotedString() cfString { } // the { has already been consumed -func (p *textPlistParser) parseDictionary(ignoreEof bool) *cfDictionary { +func (p *textPlistParser) parseDictionary(ignoreEof bool) cfValue { //p.ignore() // ignore the { var keypv cfValue keys := make([]string, 0, 32) @@ -344,6 +352,9 @@ outer: var val cfValue n := p.next() if n == ';' { + // This is supposed to be .strings-specific. + // GNUstep parses this as an empty string. + // Apple copies the key like we do. val = keypv } else if n == '=' { // whitespace is consumed within @@ -362,7 +373,8 @@ outer: values = append(values, val) } - return &cfDictionary{keys: keys, values: values} + dict := &cfDictionary{keys: keys, values: values} + return dict.maybeUID(p.format == OpenStepFormat) } // the ( has already been consumed @@ -398,18 +410,45 @@ outer: // the <* have already been consumed func (p *textPlistParser) parseGNUStepValue() cfValue { typ := p.next() + + if typ == '>' || typ == eof { // <*>, <*EOF + p.error("invalid GNUStep extended value") + } + + if typ != 'I' && typ != 'R' && typ != 'B' && typ != 'D' { + // early out: no need to collect the value if we'll fail to understand it + p.error("unknown GNUStep extended value type `" + string(typ) + "'") + } + + if p.peek() == '"' { // <*x" + p.next() + } + p.ignore() p.scanUntil('>') - if typ == eof || typ == '>' || p.empty() || p.peek() == eof { - p.error("invalid GNUStep extended value") + if p.peek() == eof { // <*xEOF or <*x"EOF + p.error("unterminated GNUStep extended value") + } + + if p.empty() { // <*x>, <*x""> + p.error("empty GNUStep extended value") } v := p.emit() p.next() // consume the > + if v[len(v)-1] == '"' { + // GNUStep tolerates malformed quoted values, as in <*I5"> and <*I"5> + // It purportedly does so by stripping the trailing quote + v = v[:len(v)-1] + } + switch typ { case 'I': + if len(v) == 0 { + p.error("truncated GNUStep extended value") + } if v[0] == '-' { n := mustParseInt(v, 10, 64) return &cfNumber{signed: true, value: uint64(n)} @@ -421,6 +460,9 @@ func (p *textPlistParser) parseGNUStepValue() cfValue { n := mustParseFloat(v, 64) return &cfReal{wide: true, value: n} // TODO(DH) 32/64 case 'B': + if len(v) == 0 { + p.error("truncated GNUStep extended value") + } b := v[0] == 'Y' return cfBoolean(b) case 'D': @@ -431,10 +473,33 @@ func (p *textPlistParser) parseGNUStepValue() cfValue { return cfDate(t.In(time.UTC)) } - p.error("invalid GNUStep type " + string(typ)) + // We should never get here; we checked the type above return nil } +// the <[ have already been consumed +func (p *textPlistParser) parseGNUStepBase64() cfData { + p.ignore() + p.scanUntil(']') + v := p.emit() + + if p.next() != ']' { + p.error("invalid GNUStep base64 data (expected ']')") + } + + if p.next() != '>' { + p.error("invalid GNUStep base64 data (expected '>')") + } + + // Emulate NSDataBase64DecodingIgnoreUnknownCharacters + filtered := strings.Map(base64ValidChars.Map, v) + data, err := base64.StdEncoding.DecodeString(filtered) + if err != nil { + p.error("invalid GNUStep base64 data: " + err.Error()) + } + return cfData(data) +} + // The < has already been consumed func (p *textPlistParser) parseHexData() cfData { buf := make([]byte, 256) @@ -452,7 +517,9 @@ func (p *textPlistParser) parseHexData() cfData { } p.ignore() return cfData(buf[:i]) - case ' ', '\t', '\n', '\r', '\u2028', '\u2029': // more lax than apple here: skip spaces + // Apple and GNUstep both want these in pairs. We are a bit more lax. + // GS accepts comments too, but that seems like a lot of work. + case ' ', '\t', '\n', '\r', '\u2028', '\u2029': continue } @@ -487,13 +554,17 @@ func (p *textPlistParser) parsePlistValue() cfValue { case eof: return &cfDictionary{} case '<': - if p.next() == '*' { + switch p.next() { + case '*': p.format = GNUStepFormat return p.parseGNUStepValue() + case '[': + p.format = GNUStepFormat + return p.parseGNUStepBase64() + default: + p.backup() + return p.parseHexData() } - - p.backup() - return p.parseHexData() case '"': return p.parseQuotedString() case '{': diff --git a/vendor/howett.net/plist/text_tables.go b/vendor/howett.net/plist/text_tables.go index 319c55c5..2bdd7ba9 100644 --- a/vendor/howett.net/plist/text_tables.go +++ b/vendor/howett.net/plist/text_tables.go @@ -2,6 +2,14 @@ package plist type characterSet [4]uint64 +func (s *characterSet) Map(ch rune) rune { + if s.Contains(ch) { + return ch + } else { + return -1 + } +} + func (s *characterSet) Contains(ch rune) bool { return ch >= 0 && ch <= 255 && s.ContainsByte(byte(ch)) } @@ -21,8 +29,9 @@ var gsQuotable = characterSet{ } // 7f instead of 3f in the top line: CFOldStylePlist.c says . is valid, but they quote it. +// ef instead og 6f in the top line: ' will be quoted var osQuotable = characterSet{ - 0xf4007f6fffffffff, + 0xf4007fefffffffff, 0xf8000001f8000001, 0xffffffffffffffff, 0xffffffffffffffff, @@ -41,3 +50,12 @@ var newlineCharacterSet = characterSet{ 0x0000000000000000, 0x0000000000000000, } + +// Bitmap of characters that are valid in base64-encoded strings. +// Used to filter out non-b64 characters to emulate NSDataBase64DecodingIgnoreUnknownCharacters +var base64ValidChars = characterSet{ + 0x23ff880000000000, + 0x07fffffe07fffffe, + 0x0000000000000000, + 0x0000000000000000, +} diff --git a/vendor/howett.net/plist/unmarshal.go b/vendor/howett.net/plist/unmarshal.go index 06f5d6fc..63b4b1d5 100644 --- a/vendor/howett.net/plist/unmarshal.go +++ b/vendor/howett.net/plist/unmarshal.go @@ -172,11 +172,25 @@ func (p *Decoder) unmarshal(pval cfValue, val reflect.Value) { panic(incompatibleTypeError) } case cfData: - if val.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { - val.SetBytes([]byte(pval)) - } else { + if val.Kind() != reflect.Slice && val.Kind() != reflect.Array { + panic(incompatibleTypeError) + } + + if typ.Elem().Kind() != reflect.Uint8 { panic(incompatibleTypeError) } + + b := []byte(pval) + switch val.Kind() { + case reflect.Slice: + val.SetBytes(b) + case reflect.Array: + if val.Len() < len(b) { + panic(fmt.Errorf("plist: attempted to unmarshal %d bytes into a byte array of size %d", len(b), val.Len())) + } + sval := reflect.ValueOf(b) + reflect.Copy(val, sval) + } case cfUID: if val.Type() == uidType { val.SetUint(uint64(pval)) diff --git a/vendor/howett.net/plist/xml_generator.go b/vendor/howett.net/plist/xml_generator.go index 0b59ed7f..30597c16 100644 --- a/vendor/howett.net/plist/xml_generator.go +++ b/vendor/howett.net/plist/xml_generator.go @@ -24,10 +24,6 @@ const ( xmlRealTag = "real" xmlStringTag = "string" xmlTrueTag = "true" - - // magic value used in the XML encoding of UIDs - // (stored as a dictionary mapping CF$UID->integer) - xmlCFUIDMagic = "CF$UID" ) func formatXMLFloat(f float64) string { @@ -145,10 +141,7 @@ func (p *xmlPlistGenerator) writePlistValue(pval cfValue) { case *cfArray: p.writeArray(pval) case cfUID: - p.openTag(xmlDictTag) - p.element(xmlKeyTag, xmlCFUIDMagic) - p.element(xmlIntegerTag, strconv.FormatUint(uint64(pval), 10)) - p.closeTag(xmlDictTag) + p.writePlistValue(pval.toDict()) } } diff --git a/vendor/howett.net/plist/xml_parser.go b/vendor/howett.net/plist/xml_parser.go index 8d8cfd19..7415ef3e 100644 --- a/vendor/howett.net/plist/xml_parser.go +++ b/vendor/howett.net/plist/xml_parser.go @@ -177,13 +177,8 @@ func (p *xmlPlistParser) parseXMLElement(element xml.StartElement) cfValue { } } - if len(keys) == 1 && keys[0] == "CF$UID" && len(values) == 1 { - if integer, ok := values[0].(*cfNumber); ok { - return cfUID(integer.value) - } - } - - return &cfDictionary{keys: keys, values: values} + dict := &cfDictionary{keys: keys, values: values} + return dict.maybeUID(false) case "array": p.ntags++ values := make([]cfValue, 0, 10) diff --git a/vendor/modules.txt b/vendor/modules.txt index b8186ebd..9c748023 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,12 +1,13 @@ -# cloud.google.com/go/cloudsqlconn v1.4.0 +# cloud.google.com/go/cloudsqlconn v1.6.0 ## explicit; go 1.20 cloud.google.com/go/cloudsqlconn cloud.google.com/go/cloudsqlconn/errtype +cloud.google.com/go/cloudsqlconn/instance cloud.google.com/go/cloudsqlconn/internal/cloudsql cloud.google.com/go/cloudsqlconn/internal/trace cloud.google.com/go/cloudsqlconn/mysql/mysql cloud.google.com/go/cloudsqlconn/postgres/pgxv4 -# cloud.google.com/go/compute v1.20.1 +# cloud.google.com/go/compute v1.23.3 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 @@ -18,7 +19,7 @@ github.com/99designs/go-keychain # github.com/99designs/keyring v1.2.2 ## explicit; go 1.19 github.com/99designs/keyring -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud @@ -27,6 +28,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared @@ -36,9 +38,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 -## explicit; go 1.18 -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -47,7 +47,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob @@ -62,12 +62,12 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service -# github.com/ClickHouse/ch-go v0.52.1 -## explicit; go 1.18 +# github.com/ClickHouse/ch-go v0.61.2 +## explicit; go 1.21 github.com/ClickHouse/ch-go/compress github.com/ClickHouse/ch-go/proto -# github.com/ClickHouse/clickhouse-go/v2 v2.10.1 -## explicit; go 1.18 +# github.com/ClickHouse/clickhouse-go/v2 v2.17.1 +## explicit; go 1.20 github.com/ClickHouse/clickhouse-go/v2 github.com/ClickHouse/clickhouse-go/v2/contributors github.com/ClickHouse/clickhouse-go/v2/ext @@ -80,45 +80,41 @@ github.com/ClickHouse/clickhouse-go/v2/resources # github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c ## explicit github.com/JohnCGriffin/overflow -# github.com/andybalholm/brotli v1.0.5 -## explicit; go 1.12 +# github.com/andybalholm/brotli v1.1.0 +## explicit; go 1.13 github.com/andybalholm/brotli -# github.com/apache/arrow/go/v12 v12.0.0 -## explicit; go 1.18 -github.com/apache/arrow/go/v12/arrow -github.com/apache/arrow/go/v12/arrow/array -github.com/apache/arrow/go/v12/arrow/arrio -github.com/apache/arrow/go/v12/arrow/bitutil -github.com/apache/arrow/go/v12/arrow/compute -github.com/apache/arrow/go/v12/arrow/compute/internal/exec -github.com/apache/arrow/go/v12/arrow/compute/internal/kernels -github.com/apache/arrow/go/v12/arrow/decimal128 -github.com/apache/arrow/go/v12/arrow/decimal256 -github.com/apache/arrow/go/v12/arrow/encoded -github.com/apache/arrow/go/v12/arrow/endian -github.com/apache/arrow/go/v12/arrow/float16 -github.com/apache/arrow/go/v12/arrow/internal -github.com/apache/arrow/go/v12/arrow/internal/debug -github.com/apache/arrow/go/v12/arrow/internal/dictutils -github.com/apache/arrow/go/v12/arrow/internal/flatbuf -github.com/apache/arrow/go/v12/arrow/ipc -github.com/apache/arrow/go/v12/arrow/memory -github.com/apache/arrow/go/v12/arrow/memory/internal/cgoalloc -github.com/apache/arrow/go/v12/arrow/memory/mallocator -github.com/apache/arrow/go/v12/arrow/scalar -github.com/apache/arrow/go/v12/internal/bitutils -github.com/apache/arrow/go/v12/internal/hashing -github.com/apache/arrow/go/v12/internal/utils -github.com/apache/arrow/go/v12/parquet -github.com/apache/arrow/go/v12/parquet/compress -github.com/apache/arrow/go/v12/parquet/internal/debug -github.com/apache/arrow/go/v12/parquet/internal/gen-go/parquet -# github.com/apache/thrift v0.16.0 -## explicit; go 1.16 -github.com/apache/thrift/lib/go/thrift -# github.com/aws/aws-sdk-go v1.40.45 -## explicit; go 1.11 +github.com/andybalholm/brotli/matchfinder +# github.com/apache/arrow/go/v14 v14.0.2 +## explicit; go 1.20 +github.com/apache/arrow/go/v14/arrow +github.com/apache/arrow/go/v14/arrow/array +github.com/apache/arrow/go/v14/arrow/arrio +github.com/apache/arrow/go/v14/arrow/bitutil +github.com/apache/arrow/go/v14/arrow/compute +github.com/apache/arrow/go/v14/arrow/compute/exec +github.com/apache/arrow/go/v14/arrow/compute/internal/kernels +github.com/apache/arrow/go/v14/arrow/decimal128 +github.com/apache/arrow/go/v14/arrow/decimal256 +github.com/apache/arrow/go/v14/arrow/encoded +github.com/apache/arrow/go/v14/arrow/endian +github.com/apache/arrow/go/v14/arrow/float16 +github.com/apache/arrow/go/v14/arrow/internal +github.com/apache/arrow/go/v14/arrow/internal/debug +github.com/apache/arrow/go/v14/arrow/internal/dictutils +github.com/apache/arrow/go/v14/arrow/internal/flatbuf +github.com/apache/arrow/go/v14/arrow/ipc +github.com/apache/arrow/go/v14/arrow/memory +github.com/apache/arrow/go/v14/arrow/memory/internal/cgoalloc +github.com/apache/arrow/go/v14/arrow/memory/mallocator +github.com/apache/arrow/go/v14/arrow/scalar +github.com/apache/arrow/go/v14/internal/bitutils +github.com/apache/arrow/go/v14/internal/hashing +github.com/apache/arrow/go/v14/internal/json +github.com/apache/arrow/go/v14/internal/utils +# github.com/aws/aws-sdk-go v1.50.6 +## explicit; go 1.19 github.com/aws/aws-sdk-go/aws +github.com/aws/aws-sdk-go/aws/auth/bearer github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awsutil github.com/aws/aws-sdk-go/aws/client @@ -158,78 +154,90 @@ github.com/aws/aws-sdk-go/service/athena github.com/aws/aws-sdk-go/service/athena/athenaiface github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface +github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2 v1.17.7 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2 v1.24.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/arn github.com/aws/aws-sdk-go-v2/aws/defaults github.com/aws/aws-sdk-go-v2/aws/middleware +github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics github.com/aws/aws-sdk-go-v2/aws/protocol/xml github.com/aws/aws-sdk-go-v2/aws/ratelimit github.com/aws/aws-sdk-go-v2/aws/retry github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4 github.com/aws/aws-sdk-go-v2/aws/signer/v4 github.com/aws/aws-sdk-go-v2/aws/transport/http +github.com/aws/aws-sdk-go-v2/internal/auth +github.com/aws/aws-sdk-go-v2/internal/auth/smithy github.com/aws/aws-sdk-go-v2/internal/awsutil +github.com/aws/aws-sdk-go-v2/internal/context +github.com/aws/aws-sdk-go-v2/internal/endpoints +github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn github.com/aws/aws-sdk-go-v2/internal/rand github.com/aws/aws-sdk-go-v2/internal/sdk github.com/aws/aws-sdk-go-v2/internal/sdkio github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/credentials v1.13.18 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/credentials v1.16.16 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/credentials -# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.59 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/feature/s3/manager -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.31 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.25 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.23 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/v4a github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4 -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.26 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/checksum -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.25 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.14.0 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/s3shared github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config -# github.com/aws/aws-sdk-go-v2/service/s3 v1.31.0 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/s3 github.com/aws/aws-sdk-go-v2/service/s3/internal/arn github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints github.com/aws/aws-sdk-go-v2/service/s3/types -# github.com/aws/smithy-go v1.13.5 -## explicit; go 1.15 +# github.com/aws/smithy-go v1.19.0 +## explicit; go 1.19 github.com/aws/smithy-go +github.com/aws/smithy-go/auth github.com/aws/smithy-go/auth/bearer +github.com/aws/smithy-go/container/private/cache +github.com/aws/smithy-go/container/private/cache/lru github.com/aws/smithy-go/context github.com/aws/smithy-go/document github.com/aws/smithy-go/encoding github.com/aws/smithy-go/encoding/httpbinding github.com/aws/smithy-go/encoding/xml +github.com/aws/smithy-go/endpoints +github.com/aws/smithy-go/endpoints/private/rulesfn github.com/aws/smithy-go/internal/sync/singleflight github.com/aws/smithy-go/io github.com/aws/smithy-go/logging @@ -250,8 +258,8 @@ github.com/cenkalti/backoff # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/danieljoos/wincred v1.1.2 -## explicit; go 1.13 +# github.com/danieljoos/wincred v1.2.1 +## explicit; go 1.18 github.com/danieljoos/wincred # github.com/denisenkom/go-mssqldb v0.12.3 ## explicit; go 1.13 @@ -260,7 +268,7 @@ github.com/denisenkom/go-mssqldb/internal/cp github.com/denisenkom/go-mssqldb/internal/decimal github.com/denisenkom/go-mssqldb/internal/querytext github.com/denisenkom/go-mssqldb/msdsn -# github.com/dvsekhvalnov/jose2go v1.5.0 +# github.com/dvsekhvalnov/jose2go v1.6.0 ## explicit; go 1.15 github.com/dvsekhvalnov/jose2go github.com/dvsekhvalnov/jose2go/aes @@ -270,8 +278,8 @@ github.com/dvsekhvalnov/jose2go/compact github.com/dvsekhvalnov/jose2go/kdf github.com/dvsekhvalnov/jose2go/keys/ecc github.com/dvsekhvalnov/jose2go/padding -# github.com/elastic/go-sysinfo v1.8.1 -## explicit; go 1.17 +# github.com/elastic/go-sysinfo v1.11.2 +## explicit; go 1.18 github.com/elastic/go-sysinfo github.com/elastic/go-sysinfo/internal/registry github.com/elastic/go-sysinfo/providers/aix @@ -280,13 +288,16 @@ github.com/elastic/go-sysinfo/providers/linux github.com/elastic/go-sysinfo/providers/shared github.com/elastic/go-sysinfo/providers/windows github.com/elastic/go-sysinfo/types -# github.com/elastic/go-windows v1.0.0 +# github.com/elastic/go-windows v1.0.1 ## explicit github.com/elastic/go-windows +# github.com/felixge/httpsnoop v1.0.4 +## explicit; go 1.13 +github.com/felixge/httpsnoop # github.com/form3tech-oss/jwt-go v3.2.5+incompatible ## explicit github.com/form3tech-oss/jwt-go -# github.com/gabriel-vasile/mimetype v1.4.2 +# github.com/gabriel-vasile/mimetype v1.4.3 ## explicit; go 1.20 github.com/gabriel-vasile/mimetype github.com/gabriel-vasile/mimetype/internal/charset @@ -295,16 +306,23 @@ github.com/gabriel-vasile/mimetype/internal/magic # github.com/go-faster/city v1.0.1 ## explicit; go 1.17 github.com/go-faster/city -# github.com/go-faster/errors v0.6.1 -## explicit; go 1.18 +# github.com/go-faster/errors v0.7.1 +## explicit; go 1.20 github.com/go-faster/errors # github.com/go-kit/log v0.2.1 ## explicit; go 1.17 github.com/go-kit/log github.com/go-kit/log/level -# github.com/go-logfmt/logfmt v0.5.1 +# github.com/go-logfmt/logfmt v0.6.0 ## explicit; go 1.17 github.com/go-logfmt/logfmt +# github.com/go-logr/logr v1.4.1 +## explicit; go 1.18 +github.com/go-logr/logr +github.com/go-logr/logr/funcr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/go-sql-driver/mysql v1.7.1 ## explicit; go 1.13 github.com/go-sql-driver/mysql @@ -318,7 +336,7 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings -# github.com/goccy/go-json v0.10.0 +# github.com/goccy/go-json v0.10.2 ## explicit; go 1.12 github.com/goccy/go-json github.com/goccy/go-json/internal/decoder @@ -349,14 +367,11 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/golang/snappy v0.0.4 -## explicit -github.com/golang/snappy -# github.com/google/flatbuffers v23.1.21+incompatible +# github.com/google/flatbuffers v23.5.26+incompatible ## explicit github.com/google/flatbuffers/go -# github.com/google/s2a-go v0.1.4 -## explicit; go 1.16 +# github.com/google/s2a-go v0.1.7 +## explicit; go 1.19 github.com/google/s2a-go github.com/google/s2a-go/fallback github.com/google/s2a-go/internal/authinfo @@ -376,19 +391,21 @@ github.com/google/s2a-go/internal/v2 github.com/google/s2a-go/internal/v2/certverifier github.com/google/s2a-go/internal/v2/remotesigner github.com/google/s2a-go/internal/v2/tlsconfigstore +github.com/google/s2a-go/retry github.com/google/s2a-go/stream -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.2.5 +# github.com/googleapis/enterprise-certificate-proxy v0.3.2 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.11.0 +# github.com/googleapis/gax-go/v2 v2.12.0 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/callctx github.com/googleapis/gax-go/v2/internal # github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c ## explicit @@ -396,7 +413,7 @@ github.com/gsterjov/go-libsecret # github.com/jackc/chunkreader/v2 v2.0.1 ## explicit; go 1.12 github.com/jackc/chunkreader/v2 -# github.com/jackc/pgconn v1.14.0 +# github.com/jackc/pgconn v1.14.1 ## explicit; go 1.12 github.com/jackc/pgconn github.com/jackc/pgconn/internal/ctxwatch @@ -410,10 +427,10 @@ github.com/jackc/pgpassfile # github.com/jackc/pgproto3/v2 v2.3.2 ## explicit; go 1.12 github.com/jackc/pgproto3/v2 -# github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a +# github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 ## explicit; go 1.14 github.com/jackc/pgservicefile -# github.com/jackc/pgtype v1.14.0 +# github.com/jackc/pgtype v1.14.1 ## explicit; go 1.13 github.com/jackc/pgtype # github.com/jackc/pgx/v4 v4.18.1 @@ -431,22 +448,16 @@ github.com/jmoiron/sqlx/reflectx # github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 ## explicit github.com/joeshaw/multierror -# github.com/klauspost/asmfmt v1.3.2 -## explicit; go 1.16 -github.com/klauspost/asmfmt -github.com/klauspost/asmfmt/cmd/asmfmt -# github.com/klauspost/compress v1.15.15 -## explicit; go 1.17 +# github.com/klauspost/compress v1.17.5 +## explicit; go 1.19 github.com/klauspost/compress -github.com/klauspost/compress/flate github.com/klauspost/compress/fse -github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash -# github.com/klauspost/cpuid/v2 v2.2.3 +# github.com/klauspost/cpuid/v2 v2.2.6 ## explicit; go 1.15 github.com/klauspost/cpuid/v2 # github.com/lib/pq v1.10.9 @@ -454,49 +465,40 @@ github.com/klauspost/cpuid/v2 github.com/lib/pq github.com/lib/pq/oid github.com/lib/pq/scram -# github.com/matttproud/golang_protobuf_extensions v1.0.4 -## explicit; go 1.9 -github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 -## explicit -github.com/minio/asm2plan9s -# github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 -## explicit -github.com/minio/c2goasm # github.com/mtibben/percent v0.2.1 ## explicit; go 1.14 github.com/mtibben/percent -# github.com/paulmach/orb v0.9.0 +# github.com/paulmach/orb v0.11.1 ## explicit; go 1.15 github.com/paulmach/orb -# github.com/pierrec/lz4/v4 v4.1.17 +# github.com/pierrec/lz4/v4 v4.1.21 ## explicit; go 1.14 github.com/pierrec/lz4/v4 github.com/pierrec/lz4/v4/internal/lz4block github.com/pierrec/lz4/v4/internal/lz4errors github.com/pierrec/lz4/v4/internal/lz4stream github.com/pierrec/lz4/v4/internal/xxh32 -# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 +# github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c ## explicit; go 1.14 github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.17.0 +# github.com/prometheus/client_golang v1.18.0 ## explicit; go 1.19 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 -## explicit; go 1.18 +# github.com/prometheus/client_model v0.5.0 +## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.44.0 -## explicit; go 1.18 +# github.com/prometheus/common v0.46.0 +## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model github.com/prometheus/common/version -# github.com/prometheus/procfs v0.11.1 +# github.com/prometheus/procfs v0.12.0 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -512,19 +514,19 @@ github.com/segmentio/asm/cpu/arm github.com/segmentio/asm/cpu/arm64 github.com/segmentio/asm/cpu/cpuid github.com/segmentio/asm/cpu/x86 -# github.com/segmentio/go-athena v0.0.0-20181208004937-dfa5f1818930 +# github.com/segmentio/go-athena v0.0.0-20230626212750-5fac08ed8dab ## explicit github.com/segmentio/go-athena # github.com/shopspring/decimal v1.3.1 ## explicit; go 1.13 github.com/shopspring/decimal -# github.com/sirupsen/logrus v1.9.0 +# github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/snowflakedb/gosnowflake v1.6.22 +# github.com/snowflakedb/gosnowflake v1.7.2 ## explicit; go 1.19 github.com/snowflakedb/gosnowflake -# github.com/vertica/vertica-sql-go v1.3.2 +# github.com/vertica/vertica-sql-go v1.3.3 ## explicit; go 1.13 github.com/vertica/vertica-sql-go github.com/vertica/vertica-sql-go/common @@ -553,17 +555,32 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/otel v1.13.0 -## explicit; go 1.18 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 +## explicit; go 1.20 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil +# go.opentelemetry.io/otel v1.22.0 +## explicit; go 1.20 +go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes go.opentelemetry.io/otel/internal go.opentelemetry.io/otel/internal/attribute -# go.opentelemetry.io/otel/trace v1.13.0 -## explicit; go 1.18 +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/v1.20.0 +# go.opentelemetry.io/otel/metric v1.22.0 +## explicit; go 1.20 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/embedded +# go.opentelemetry.io/otel/trace v1.22.0 +## explicit; go 1.20 go.opentelemetry.io/otel/trace -# golang.org/x/crypto v0.14.0 -## explicit; go 1.17 +go.opentelemetry.io/otel/trace/embedded +# golang.org/x/crypto v0.18.0 +## explicit; go 1.18 golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 golang.org/x/crypto/cryptobyte @@ -574,19 +591,18 @@ golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/md4 golang.org/x/crypto/ocsp golang.org/x/crypto/pbkdf2 -# golang.org/x/exp v0.0.0-20230206171751-46f607a40771 -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20240119083558-1b970713d09a +## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.8.0 -## explicit; go 1.17 +# golang.org/x/mod v0.14.0 +## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.17.0 -## explicit; go 1.17 -golang.org/x/net/context +# golang.org/x/net v0.20.0 +## explicit; go 1.18 golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/http/httpguts @@ -597,31 +613,32 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.10.0 -## explicit; go 1.17 +# golang.org/x/oauth2 v0.16.0 +## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google golang.org/x/oauth2/google/internal/externalaccount +golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.3.0 -## explicit; go 1.17 +# golang.org/x/sync v0.6.0 +## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.13.0 -## explicit; go 1.17 +# golang.org/x/sys v0.16.0 +## explicit; go 1.18 golang.org/x/sys/cpu -golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.13.0 -## explicit; go 1.17 +# golang.org/x/term v0.16.0 +## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.13.0 -## explicit; go 1.17 +# golang.org/x/text v0.14.0 +## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/internal golang.org/x/text/internal/language @@ -635,10 +652,10 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.3.0 -## explicit +# golang.org/x/time v0.5.0 +## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.6.0 +# golang.org/x/tools v0.17.0 ## explicit; go 1.18 golang.org/x/tools/cmd/goimports golang.org/x/tools/cmd/stringer @@ -646,11 +663,12 @@ golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys golang.org/x/tools/internal/event/label -golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/event/tag golang.org/x/tools/internal/gcimporter golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk @@ -660,11 +678,12 @@ golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal -# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 -## explicit; go 1.17 +golang.org/x/tools/internal/versions +# golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 +## explicit; go 1.18 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.130.0 +# google.golang.org/api v0.160.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -678,7 +697,7 @@ google.golang.org/api/option/internaloption google.golang.org/api/sqladmin/v1beta4 google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/internal @@ -688,17 +707,15 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.56.1 -## explicit; go 1.17 +# google.golang.org/grpc v1.61.0 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -728,10 +745,12 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -743,12 +762,14 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.31.0 -## explicit; go 1.11 +# google.golang.org/protobuf v1.32.0 +## explicit; go 1.17 +google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire @@ -787,6 +808,6 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# howett.net/plist v0.0.0-20181124034731-591f970eefbb -## explicit +# howett.net/plist v1.0.1 +## explicit; go 1.12 howett.net/plist From 0b6ff699f86fb55103728e751ffd56c9249b8d74 Mon Sep 17 00:00:00 2001 From: Philipp Defner Date: Tue, 30 Jan 2024 12:46:27 +0100 Subject: [PATCH 2/3] Force > 1.21 for toolchain in go.mod --- .github/workflows/go.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index a624c176..595daeec 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -12,9 +12,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '^1.21.5' + go-version: '>=1.21' - name: Build run: go build -v ./... From 62446f092a4392c472fa6fa150004c66d7efa5d5 Mon Sep 17 00:00:00 2001 From: Philipp Defner Date: Tue, 30 Jan 2024 12:55:04 +0100 Subject: [PATCH 3/3] Remove toolchain --- go.mod | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.mod b/go.mod index acda7351..be3144c3 100644 --- a/go.mod +++ b/go.mod @@ -2,8 +2,6 @@ module github.com/justwatchcom/sql_exporter go 1.21 -toolchain go1.21.5 - require ( cloud.google.com/go/cloudsqlconn v1.6.0 github.com/ClickHouse/clickhouse-go/v2 v2.17.1